repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/vm/ark_dpa_vm_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_generator=event_generator, theme=ARK_INQUIRER_THEME, *args, **kwargs)\n\n def render(self, question, answers=None):\n question.answers = answers or {}\n\n if question.ignore:\n return question.default\n\n clazz = self.render_factory(question.kind)\n render = clazz(question, terminal=self.terminal, theme=self._theme, show_default=question.show_default)\n if isinstance(\n render, (inquirer.render.console._text.Text, inquirer.render.console._password.Password, inquirer.render.console._path.Path)\n ):\n render.current = ''\n self.clear_eos()\n\n try:\n a = self._event_loop(render)\n if not a and question.default:\n a = question.default\n elif not a and question.name in answers:\n a = answers[question.name]\n return a\n finally:\n print('')\n\n def _print_header(self, render):\n base = render.get_header()\n\n header = base[: self.width - 9] + '...' if len(base) > self.width - 6 else base\n default_value = '{normal} ({default})'.format(default=render.question.default, normal=self.terminal.normal)\n show_default = render.question.default and render.show_default\n header += default_value if show_default else ''\n msg_template = '{t.move_up}{t.clear_eol}{tq.brackets_color}{tq.mark_color}?{tq.brackets_color} {msg}{t.normal}'\n\n escaped_current_value = str(render.get_current_value()).replace('{', '{{').replace('}', '}}')\n self.print_str(\n f'\\n{msg_template} {escaped_current_value}',\n msg=header,\n lf=not render.title_inline,\n tq=self._theme.Question,\n )" }, { "identifier": "ArkISPAuth", "path": "ark_sdk_python/auth/ark_isp_auth.py", "snippet": "class ArkISPAuth(ArkAuth):\n def __perform_identity_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=secret.secret.get_secret_value() if secret else None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, ArkSystemConfig.is_interactive() and method_settings.identity_mfa_interactive, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform [{str(ex)}]')\n raise ArkAuthException from ex\n\n def __perform_identity_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n load_cache=True,\n cache_profile=profile,\n )\n identity.refresh_auth_identity(profile, method_settings.identity_mfa_interactive, False)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n raise ArkAuthException('Failed to authenticate to isp via identity') from ex\n\n def __perform_identity_service_user_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n if not secret:\n raise ArkException('Token secret is required for identity service user auth')\n method_settings = cast(IdentityServiceUserArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentityServiceUser(\n username=auth_profile.username,\n token=secret.secret.get_secret_value(),\n app_name=method_settings.identity_authorization_application,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.IdentityServiceUser,\n expires_in=datetime.now() + timedelta(hours=4),\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform with service user [{str(ex)}]')\n raise ArkAuthException from ex\n\n @overrides\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs authentication to the identity security platform identity tenant\n Authentication can be done with either a service user or a normal user\n Authentication Methods:\n - Identity, Default\n - IdentityServiceUser\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n secret (Optional[ArkSecret], optional): _description_. Defaults to None.\n force (bool, optional): _description_. Defaults to False.\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_authentication(profile, auth_profile, secret, force)\n if auth_profile.auth_method == ArkAuthMethod.IdentityServiceUser:\n return self.__perform_identity_service_user_authentication(profile, auth_profile, secret, force)\n raise ArkAuthException('Given auth method is not supported')\n\n @overrides\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Refresh for isp tenant is supported only for identity\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing refresh authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_refresh_authentication(profile, auth_profile, token)\n return token\n\n @staticmethod\n @overrides\n def authenticator_name() -> str:\n return AUTH_NAME\n\n @staticmethod\n @overrides\n def authenticator_human_readable_name() -> str:\n return AUTH_HUMAN_READABLE_NAME\n\n @staticmethod\n @overrides\n def supported_auth_methods() -> List[ArkAuthMethod]:\n return AUTH_METHODS\n\n @staticmethod\n @overrides\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n return DEFAULT_AUTH_METHOD, DEFAULT_AUTH_METHOD_SETTINGS" }, { "identifier": "ArkDPABasePoliciesEditorService", "path": "ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py", "snippet": "class ArkDPABasePoliciesEditorService(\n ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]\n):\n def __init__(\n self,\n policy_type: PolicyType,\n add_policy_type: AddPolicyType,\n update_policy_type: UpdatePolicyType,\n isp_auth: ArkISPAuth,\n policies_family: str,\n tenant_id: str,\n policies_cache_dir: Optional[str] = None,\n profile: Optional[ArkProfile] = None,\n ) -> None:\n super().__init__(isp_auth)\n profile = profile or ArkProfileLoader.load_default_profile()\n self._policies_family = policies_family\n self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)\n if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:\n self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])\n self.__policies_cache_dir = self.__policies_cache_dir / policies_family\n self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)\n self.__policy_type = policy_type\n self.__add_policy_type = add_policy_type\n self.__update_policy_type = update_policy_type\n\n @abstractmethod\n def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:\n pass\n\n @abstractmethod\n def _list_policies(self) -> List[PolicyListItemType]:\n pass\n\n @abstractmethod\n def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n pass\n\n @abstractmethod\n def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:\n pass\n\n def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:\n remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))\n if remote_policy != workspace_policy:\n return (workspace_policy, remote_policy)\n return None\n\n def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:\n workspace_policies = self.__load_existing_policies_from_workspace()\n with ThreadPoolExecutor() as executor:\n remote_policies = {\n p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None\n }\n return remote_policies\n\n def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:\n p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')\n policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']\n policies = {}\n for f in policies_files:\n policy = self.__policy_type.parse_file(f)\n policies[policy.policy_name] = policy\n return policies\n\n def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.removed')\n\n def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.generated')\n\n def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix()\n\n def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:\n policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))\n policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')\n if policy_path.exists():\n existing_data = self.__policy_type.parse_raw(policy_path.read_text())\n if existing_data != policy_data:\n if not override:\n return policy_data\n if not policy_data.policy_id:\n policy_data.policy_id = policy.policy_id\n policy_path.write_text(policy_data.json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:\n \"\"\"\n Loads all remote policies into the local workspace.\n The user is asked whether to overwrite existing policies that were edited either locally or remotely.\n When default overwrite is enabled, existing policies are overwritten without prompts.\n\n Args:\n load_policies (ArkDPALoadPolicies): _description_\n\n Returns:\n ArkDPALoadedPolicies: _description_\n \"\"\"\n policies = self._list_policies()\n policies_to_query: Dict[str, PolicyType] = []\n with ThreadPoolExecutor() as executor:\n policies_to_query = {\n p.policy_name: p\n for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)\n if p is not None\n }\n # Build the query editor to ask the user\n policies_to_override = []\n if policies_to_query:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'override',\n message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',\n choices=[p.policy_name for p in policies_to_query.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policies_to_override = answers['override']\n for policy_name in policies_to_override:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_path.exists() and policy_name in policies_to_query:\n policy_path.write_text(policies_to_query[policy_name].json(indent=4))\n return ArkDPALoadedPolicies(\n loaded_path=str(self.__policies_cache_dir),\n overall_policies_count=len(policies),\n loaded_policies_count=len(policies) - len(policies_to_query),\n overriden_policies_count=len(policies_to_override),\n untouched_policies_count=len(policies_to_query) - len(policies_to_override),\n )\n\n def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:\n \"\"\"\n Edits the set of specified policies one at a time, either via the CLI or the default OS editor.\n Edited policies are only saved locally until they are committed.\n\n Args:\n edit_policies (ArkDPAEditPolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'\n )\n policy_names = edit_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to edit?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')\n for name in policy_names\n ],\n render=ArkInquirerRender(),\n answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},\n )\n for name in policy_names:\n policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])\n for path in [\n Path(self.__policies_cache_dir) / (name + '.json'),\n Path(self.__policies_cache_dir) / (name + '.json.generated'),\n ]:\n if path.exists():\n path.write_text(policy.json(indent=4))\n break\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit {self._policies_family} policies, '\n f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:\n \"\"\"\n Removes one or more policies from the local workspace.\n Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.\n After committing the changes, the policies are deleted both locally and remotely.\n New, uncommitted policies are deleted locally after the user consents.\n\n Args:\n remove_policies (ArkDPARemovePolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'\n )\n policy_names = remove_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to remove?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n for policy_name in policy_names:\n for path in [\n Path(self.__policies_cache_dir) / (policy_name + '.json'),\n Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),\n ]:\n if path.exists():\n if path.suffix == '.json':\n path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))\n else:\n answers = inquirer.prompt(\n [\n inquirer.Confirm(\n 'remove',\n message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n if answers['remove']:\n path.unlink(missing_ok=True)\n\n def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:\n \"\"\"\n Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.\n Policies are viewed in the machine's default editor (both existing policies and newly generated policies).\n\n Args:\n view_policies (ArkDPAViewPolicies): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy_names = view_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to view?',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n if not policy_names:\n return\n try:\n if view_policies.unified:\n inquirer.prompt(\n [inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],\n answers={\n 'views': '\\n\\n\\n'.join(\n [f'# Policy [{policy_name}]\\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]\n )\n },\n render=ArkInquirerRender(),\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to view the {self._policies_family} policies, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:\n \"\"\"\n Resets local workspace policies.\n When all policies are reset, all local policies are overwritten and deleted policies are removed.\n Otherwise, the user can select which policies are reset.\n This function does not alter newly generated uncommitted policies.\n\n Args:\n reset_policy (ArkDPAResetPolicies): _description_\n \"\"\"\n if reset_policy.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]\n )\n if not answers:\n return\n if answers['reset']:\n self.load_policies(ArkDPALoadPolicies(override=True))\n else:\n policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not policies_diff and not removed_policies:\n return\n policy_names = reset_policy.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to reset?, press space to select',\n choices=[p for p in policies_diff.keys() + removed_policies.keys()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]\n for policy_name in policy_names:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_name in policies_diff:\n policy_path.write_text(policies_diff[policy_name][1].json(indent=4))\n elif policy_name in removed_policies:\n policy_path.write_text(removed_policies[policy_name].json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def generate_policy(self, generate_policy: GeneratePolicyType) -> None:\n \"\"\"\n Generates a new policy from a template and the user's parameters.\n The user is prompted for the parameters when they are not specified in the CLI.\n After policy's parameters are defined, the policy is generates in memory and can bee edited.\n The new policy is saved locally until it is committed.\n\n Args:\n generate_policy (GeneratePolicyType): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy = self._generate_policy(generate_policy, workspace_policies)\n policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')\n # Let the user edit the generated policy\n if not generate_policy.disable_edit:\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(\n 'policy_editor',\n f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',\n )\n ],\n render=ArkInquirerRender(),\n answers={'policy_editor': policy.json(indent=4, exclude_none=True)},\n )\n if not answers:\n return\n policy = self.__policy_type.parse_raw(answers['policy_editor'])\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit the {self._policies_family} policy, '\n f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'\n )\n policy_path.write_text(policy.json(indent=4))\n\n def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None:\n \"\"\"\n Calculates the diff between the local workspace and remote policies.\n This diff includes uncommitted removed policies. A unified or per policy diff can be displayed.\n\n Args:\n policies_diff (ArkDPAPoliciesDiff): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies:\n return\n if policies_diff.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in policies_diff.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in policies_diff.names}\n if not loaded_policies_diff and not removed_policies:\n return\n diffs = {\n policy_name: difflib.unified_diff(\n policy_tuple[1].json(indent=4).splitlines(True),\n policy_tuple[0].json(indent=4).splitlines(True),\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy_tuple in loaded_policies_diff.items()\n }\n diffs.update(\n {\n policy_name: difflib.unified_diff(\n policy.json(indent=4).splitlines(True),\n '',\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy in removed_policies.items()\n }\n )\n try:\n if policies_diff.unified:\n inquirer.prompt(\n [inquirer.Editor('diffs', 'Show all diffs')],\n render=ArkInquirerRender(),\n answers={'diffs': '\\n\\n\\n'.join([''.join(d) for d in diffs.values()])},\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_diff', f'Show [{policy_name}] diff') for policy_name in diffs.keys()],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_diff': ''.join(policy_diffs) for policy_name, policy_diffs in diffs.items()},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to show {self._policies_family} policies diff, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def policies_status(self, get_policies_status: ArkDPAGetPoliciesStatus) -> ArkDPAPoliciesStatus:\n \"\"\"\n Gets the status of locally altered policies.\n\n Args:\n get_policies_status (ArkDPAGetPoliciesStatus): _description_\n\n Returns:\n ArkDPAPoliciesStatus: _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if get_policies_status.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in get_policies_status.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in get_policies_status.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in get_policies_status.names}\n return ArkDPAPoliciesStatus(\n modified_policies=list(loaded_policies_diff.keys()),\n removed_policies=list(removed_policies.keys()),\n added_policies=list(generated_policies.keys()),\n )\n\n def commit_policies(self, commit_policies: ArkDPACommitPolicies) -> None:\n \"\"\"\n Commits policies.\n The function first calculates the differences between the local and remote policies to find out which policies were edited, including\n the policies selected for deletion and new, uncommitted policies. It also\n allows selecting whether to commit all the edited policies or only specific policies by name.\n\n After all policies are committed, the workspace is reorganized accordingly.\n\n Args:\n commit_policies (ArkDPACommitPolicies): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n if commit_policies.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to commit all edited {self._policies_family} policies?')]\n )\n if not answers or not answers['reset']:\n return\n else:\n if commit_policies.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in commit_policies.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in commit_policies.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in commit_policies.names}\n else:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to commit?, press space to select',\n choices=list(loaded_policies_diff.keys()) + list(removed_policies.keys()) + list(generated_policies.keys()),\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in answers['names']}\n removed_policies = {k: v for k, v in removed_policies.items() if k in answers['names']}\n generated_policies = {k: v for k, v in generated_policies.items() if k in answers['names']}\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n with ThreadPoolExecutor() as executor:\n added = executor.map(lambda p: self._add_policy(self.__add_policy_type(**p.dict())), generated_policies.values())\n updated = executor.map(lambda p: self._update_policy(self.__update_policy_type(**p[0].dict())), loaded_policies_diff.values())\n deleted = executor.map(\n lambda p: self._delete_policy(ArkDPADeletePolicy(policy_id=p.policy_id, policy_name=p.policy_name)),\n removed_policies.values(),\n )\n # Loop for exception checking\n added_policies = list(added)\n for _ in itertools.chain(updated, deleted):\n pass\n for policy_name in removed_policies.keys():\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n for policy_name in generated_policies.keys():\n for policy in added_policies:\n if policy.policy_name == policy_name:\n (Path(self.__policies_cache_dir) / (policy_name + '.json.generated')).rename(\n (Path(self.__policies_cache_dir) / (policy_name + '.json'))\n )\n (Path(self.__policies_cache_dir) / (policy_name + '.json')).write_text(policy.json(indent=4))" }, { "identifier": "ArkProfile", "path": "ark_sdk_python/models/ark_profile.py", "snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles" }, { "identifier": "ArkDPAVMGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/vm/ark_dpa_vm_generate_policy.py", "snippet": "class ArkDPAVMGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['AWS', 'Azure', 'OnPrem']]] = Field(description='Providers to generate the policy for')\n protocols: Optional[Set[Literal['ssh', 'rdp']]] = Field(description='Protocols to generate the policy for')" }, { "identifier": "ArkProtocolType", "path": "ark_sdk_python/models/common/ark_protocol_type.py", "snippet": "class ArkProtocolType(str, MultiValueEnum):\n SSH = 'ssh', 'SSH'\n SCP = 'scp', 'SCP'\n SFTP = 'sftp', 'SFTP'\n RDP = 'rdp', 'RDP'\n CLI = 'cli', 'CLI'\n CONSOLE = 'console', 'Console'\n HTTPS = 'https', 'HTTPS'\n K8S = 'K8S', 'k8s'\n DB = 'Database', 'database', 'DATABASE'" }, { "identifier": "ArkWorkspaceType", "path": "ark_sdk_python/models/common/ark_workspace_type.py", "snippet": "class ArkWorkspaceType(str, MultiValueEnum):\n AWS = 'aws', 'AWS', 'Aws'\n AZURE = 'azure', 'AZURE', 'Azure'\n ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'\n DB = 'db', 'DATABASES', 'Databases'\n GCP = 'gcp', 'GCP'\n MYSQL = 'mysql', 'MySQL'\n MARIADB = 'mariadb', 'MariaDB'\n MSSQL = 'mssql', 'MSSQL'\n ORACLE = 'oracle', 'Oracle'\n POSTGRES = 'postgres', 'Postgres'\n FAULT = 'fault', 'FAULT'\n UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'" }, { "identifier": "ArkServiceConfig", "path": "ark_sdk_python/models/services/ark_service_config.py", "snippet": "class ArkServiceConfig(ArkModel):\n service_name: str = Field(description='Name of the service')\n required_authenticator_names: List[str] = Field(description='Required authenticators for the service to properly work')\n optional_authenticator_names: List[str] = Field(\n description='Optional authenticators for the service for extra capabilities', default_factory=list\n )" }, { "identifier": "ArkDPADeletePolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py", "snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAGetPolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py", "snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPARuleStatus", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_rule_status.py", "snippet": "class ArkDPARuleStatus(str, Enum):\n Enabled = 'Enabled'\n Disabled = 'Disabled'\n Draft = 'Draft'\n Expired = 'Expired'" }, { "identifier": "ArkDPAUserData", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_user_data.py", "snippet": "class ArkDPAUserData(ArkCamelizedModel):\n roles: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Roles allowed for auth rule', default_factory=list)\n groups: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Groups allowed for auth rule', default_factory=list)\n users: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Users allowed for auth rule', default_factory=list)" }, { "identifier": "ArkDPAVMAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_add_policy.py", "snippet": "class ArkDPAVMAddPolicy(ArkDPABaseAddPolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(\n description='Workspaces / cloud providers data per type of cloud provider, '\n 'for example for AWS, how to filter ec2 instances to connect to'\n )\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(\n description='Rules describing how and who will be able to connect to the target instances filtered by the cloud providers'\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMAuthorizationRule", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py", "snippet": "class ArkDPAVMAuthorizationRule(ArkDPABaseAuthorizationRule):\n connection_information: ArkDPAVMConnectionInformation = Field(description='Rule information on how access is made')" }, { "identifier": "ArkDPAVMConnectionInformation", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py", "snippet": "class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):\n connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('connect_as')\n def validate_connect_as(cls, val):\n for k, v in val.items():\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n for k2 in v.keys():\n if ArkProtocolType(k2) not in [\n ArkProtocolType.SSH,\n ArkProtocolType.RDP,\n ArkProtocolType.SFTP,\n ArkProtocolType.SCP,\n ArkProtocolType.HTTPS,\n ]:\n raise ValueError('Invalid connection type')\n return val" }, { "identifier": "ArkDPAVMConnectionDataType", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_connection_data.py", "snippet": "class ArkDPAVMConnectionMethodData(ArkCamelizedModel):\nclass ArkDPAVMLocalEphemeralUserConnectionMethodData(ArkDPAVMConnectionMethodData):\nclass ArkDPAVMRDPLocalEphemeralUserConnectionData(ArkCamelizedModel):" }, { "identifier": "ArkDPAVMPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policy.py", "snippet": "class ArkDPAVMPolicy(ArkDPABasePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='Cloud providers info of the policy')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='Authorization rules of the policy')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPolicyListItem", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policy_list_item.py", "snippet": "class ArkDPAVMPolicyListItem(ArkDPABasePolicyListItem):\n platforms: Optional[List[ArkWorkspaceType]] = Field(description='Names of the platforms of the policy')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platforms')\n def validate_platforms(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMAWSProviderData", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_providers.py", "snippet": "class ArkDPAVMAWSProviderData(ArkCamelizedModel):\nclass ArkDPAVMAzureProviderData(ArkCamelizedModel):\nclass ArkDPAVMGCPProviderData(ArkCamelizedModel):\nclass ArkDPAVMFQDNRulesConjunction(str, Enum):\nclass ArkDPAVMFQDNOperator(str, Enum):\nclass ArkDPAVMFQDNRule(ArkCamelizedModel):\nclass ArkDPAVMOnPremProviderData(ArkCamelizedModel):\n AND = 'AND'\n OR = 'OR'\n EXACTLY = 'EXACTLY'\n WILDCARD = 'WILDCARD'\n PREFIX = 'PREFIX'\n SUFFIX = 'SUFFIX'\n CONTAINS = 'CONTAINS'" }, { "identifier": "ArkDPAVMUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_update_policy.py", "snippet": "class ArkDPAVMUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='New cloud providers to update')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='New access rules to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPoliciesService", "path": "ark_sdk_python/services/dpa/policies/vm/ark_dpa_vm_policies_service.py", "snippet": "class ArkDPAVMPoliciesService(ArkService):\n def __init__(self, isp_auth: ArkISPAuth) -> None:\n super().__init__(isp_auth)\n self.__isp_auth = isp_auth\n self.__client: ArkISPServiceClient = ArkISPServiceClient.from_isp_auth(self.__isp_auth, 'dpa')\n\n @property\n def isp_client(self) -> ArkISPServiceClient:\n return self.__client\n\n def __policy_id_by_name(self, policy_name: str) -> str:\n policies = self.list_policies_by(ArkDPAVMPoliciesFilter(name=policy_name))\n if not policies:\n raise ArkServiceException(f'Failed to find vm policy id by name [{policy_name}]')\n return policies[0].policy_id\n\n @staticmethod\n def __serialize_providers_dict(providers_data: ArkDPAVMProvidersDict) -> Dict:\n serialized_providers_data = {}\n for k in list(providers_data.keys()):\n serialized_providers_data[serialize_dpa_vm_policies_workspace_type(k)] = providers_data[k].dict(by_alias=True)\n return serialized_providers_data\n\n @staticmethod\n def __serialize_authorization_rules_dict(authorization_rules: List[Dict]) -> None:\n for rule in authorization_rules:\n for k in list(rule['connectionInformation']['connectAs'].keys()):\n for pk in list(rule['connectionInformation']['connectAs'][k].keys()):\n item = rule['connectionInformation']['connectAs'][k][pk]\n del rule['connectionInformation']['connectAs'][k][pk]\n rule['connectionInformation']['connectAs'][k][serialize_dpa_vm_policies_protocol_type(pk)] = item\n item = rule['connectionInformation']['connectAs'][k]\n del rule['connectionInformation']['connectAs'][k]\n rule['connectionInformation']['connectAs'][serialize_dpa_vm_policies_workspace_type(k)] = item\n\n def add_policy(self, add_policy: ArkDPAVMAddPolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Adds a new VM policy with the specified information.\n\n Args:\n add_policy (ArkDPVMAAddPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n self._logger.info(f'Adding new vm policy [{add_policy.policy_name}]')\n add_policy_dict = add_policy.dict(by_alias=True)\n add_policy_dict['providersData'] = self.__serialize_providers_dict(add_policy.providers_data)\n self.__serialize_authorization_rules_dict(add_policy_dict['userAccessRules'])\n resp: Response = self.__client.post(VM_POLICIES_API, json=add_policy_dict)\n if resp.status_code == HTTPStatus.CREATED:\n try:\n policy_id = resp.json()['policyId']\n return self.policy(ArkDPAGetPolicy(policy_id=policy_id))\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse add vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse add vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to add vm policy [{resp.text}] - [{resp.status_code}]')\n\n def delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n \"\"\"\n Deletes the specified (ID or name) VM policy.\n\n Args:\n delete_policy (ArkDPADeletePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n if delete_policy.policy_name and not delete_policy.policy_id:\n delete_policy.policy_id = self.__policy_id_by_name(delete_policy.policy_name)\n self._logger.info(f'Deleting vm policy [{delete_policy.policy_id}]')\n resp: Response = self.__client.delete(VM_POLICY_API.format(policy_id=delete_policy.policy_id))\n if resp.status_code != HTTPStatus.NO_CONTENT:\n raise ArkServiceException(f'Failed to delete vm policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy(self, update_policy: ArkDPAVMUpdatePolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Updates a VM policy.\n\n Args:\n update_policy (ArkDPAVMUpdatePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if update_policy.policy_name and not update_policy.policy_id:\n update_policy.policy_id = self.__policy_id_by_name(update_policy.policy_name)\n self._logger.info(f'Updating vm policy [{update_policy.policy_id}]')\n update_dict = json.loads(update_policy.json(by_alias=True, exclude_none=True, exclude={'new_policy_name', 'policy_name'}))\n if update_policy.new_policy_name:\n update_dict['policyName'] = update_policy.new_policy_name\n else:\n update_dict['policyName'] = update_policy.policy_name\n if update_policy.providers_data:\n update_dict['providersData'] = self.__serialize_providers_dict(update_policy.providers_data)\n if 'userAccessRules' in update_dict:\n self.__serialize_authorization_rules_dict(update_dict['userAccessRules'])\n resp: Response = self.__client.put(VM_POLICY_API.format(policy_id=update_policy.policy_id), json=update_dict)\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPAVMPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse update vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse update vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to update vm policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy_status(self, update_policy_status: ArkDPAUpdatePolicyStatus) -> ArkDPAVMPolicy:\n \"\"\"\n Updates the status of the specified (by ID) VM policy.\n\n Args:\n update_policy_status (ArkDPAUpdatePolicyStatus): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if update_policy_status.policy_name and not update_policy_status.policy_id:\n update_policy_status.policy_id = self.__policy_id_by_name(update_policy_status.policy_name)\n self._logger.info(f'Updating vm policy status [{update_policy_status.policy_id}]')\n resp: Response = self.__client.put(\n VM_UPDATE_POLICY_STATUS_API.format(policy_id=update_policy_status.policy_id),\n json=update_policy_status.dict(exclude={'policy_id'}),\n )\n if resp.status_code == HTTPStatus.OK:\n return self.policy(ArkDPAGetPolicy(policy_id=update_policy_status.policy_id))\n raise ArkServiceException(f'Failed to update vm policy status [{resp.text}] - [{resp.status_code}]')\n\n def list_policies(self) -> List[ArkDPAVMPolicyListItem]:\n \"\"\"\n Lists all of the tenants's VM policies.\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n List[ArkDPAVMPolicyListItem]: _description_\n \"\"\"\n self._logger.info('Retrieving all vm policies')\n resp: Response = self.__client.get(VM_POLICIES_API)\n if resp.status_code == HTTPStatus.OK:\n try:\n return parse_obj_as(List[ArkDPAVMPolicyListItem], resp.json()['items'])\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse list vm policies response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse list vm policies response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to list vm policies [{resp.text}] - [{resp.status_code}]')\n\n def list_policies_by(self, policies_filter: ArkDPAVMPoliciesFilter) -> List[ArkDPAVMPolicyListItem]:\n \"\"\"\n Lists VM policies that match the specified filters.\n\n Args:\n policies_filter (ArkDPAVMPoliciesFilter): _description_\n\n Returns:\n List[ArkDPAVMPolicyListItem]: _description_\n \"\"\"\n self._logger.info(f'Retrieving vm policies by filter [{policies_filter}]')\n policies = self.list_policies()\n\n # Filter by statuses\n if policies_filter.statuses:\n policies = [p for p in policies if p.status in policies_filter.statuses]\n\n # Filter by name wildcard\n if policies_filter.name:\n policies = [p for p in policies if fnmatch(p.policy_name, policies_filter.name)]\n\n # Filter by cloud providers\n if policies_filter.providers:\n policies = [p for p in policies if all(cp.value in p.platforms for cp in policies_filter.providers)]\n\n return policies\n\n def policy(self, get_policy: ArkDPAGetPolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Retrieves a VM policy by ID.\n\n Args:\n get_policy (ArkDPAGetPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if get_policy.policy_name and not get_policy.policy_id:\n get_policy.policy_id = self.__policy_id_by_name(get_policy.policy_name)\n self._logger.info(f'Retrieving vm policy [{get_policy.policy_id}]')\n resp: Response = self.__client.get(VM_POLICY_API.format(policy_id=get_policy.policy_id))\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPAVMPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to retrieve vm policy [{get_policy.policy_id}] [{resp.text}] - [{resp.status_code}]')\n\n def policies_stats(self) -> ArkDPAVMPoliciesStats:\n \"\"\"\n Calculates VM policy statistics.\n\n Returns:\n ArkDPAVMPoliciesStats: _description_\n \"\"\"\n self._logger.info('Calculating vm policies stats')\n policies = self.list_policies()\n policies_stats = ArkDPAVMPoliciesStats.construct()\n policies_stats.policies_count = len(policies)\n\n # Count policies per status\n status_types: Set[ArkDPARuleStatus] = {p.status for p in policies if p.status}\n policies_stats.policies_count_per_status = {st: len([p for p in policies if p.status and p.status == st]) for st in status_types}\n\n # Count policies per platforms\n policies_stats.policies_count_per_provider = {}\n for policy in policies:\n for platform in policy.platforms:\n if platform not in policies_stats.policies_count_per_provider:\n policies_stats.policies_count_per_provider[platform] = 0\n policies_stats.policies_count_per_provider[platform] += 1\n\n return policies_stats\n\n @staticmethod\n @overrides\n def service_config() -> ArkServiceConfig:\n return SERVICE_CONFIG" } ]
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.common import ArkProtocolType, ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.vm import ( ArkDPAVMAddPolicy, ArkDPAVMAuthorizationRule, ArkDPAVMAWSProviderData, ArkDPAVMAzureProviderData, ArkDPAVMConnectionDataType, ArkDPAVMConnectionInformation, ArkDPAVMFQDNOperator, ArkDPAVMFQDNRule, ArkDPAVMFQDNRulesConjunction, ArkDPAVMGCPProviderData, ArkDPAVMLocalEphemeralUserConnectionMethodData, ArkDPAVMOnPremProviderData, ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMProvider, ArkDPAVMRDPLocalEphemeralUserConnectionData, ArkDPAVMUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.vm.ark_dpa_vm_policies_service import ArkDPAVMPoliciesService import inquirer
14,223
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData(
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData(
fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR,
18
2023-11-13 09:24:31+00:00
16k
mohenghui/detectAuto_v8
ultralytics/models/sam/model.py
[ { "identifier": "Model", "path": "ultralytics/engine/model.py", "snippet": "class Model(nn.Module):\n \"\"\"\n A base class to unify APIs for all models.\n\n Args:\n model (str, Path): Path to the model file to load or create.\n task (Any, optional): Task type for the YOLO model. Defaults to None.\n\n Attributes:\n predictor (Any): The predictor object.\n model (Any): The model object.\n trainer (Any): The trainer object.\n task (str): The type of model task.\n ckpt (Any): The checkpoint object if the model loaded from *.pt file.\n cfg (str): The model configuration if loaded from *.yaml file.\n ckpt_path (str): The checkpoint file path.\n overrides (dict): Overrides for the trainer object.\n metrics (Any): The data for metrics.\n\n Methods:\n __call__(source=None, stream=False, **kwargs):\n Alias for the predict method.\n _new(cfg:str, verbose:bool=True) -> None:\n Initializes a new model and infers the task type from the model definitions.\n _load(weights:str, task:str='') -> None:\n Initializes a new model and infers the task type from the model head.\n _check_is_pytorch_model() -> None:\n Raises TypeError if the model is not a PyTorch model.\n reset() -> None:\n Resets the model modules.\n info(verbose:bool=False) -> None:\n Logs the model info.\n fuse() -> None:\n Fuses the model for faster inference.\n predict(source=None, stream=False, **kwargs) -> List[ultralytics.engine.results.Results]:\n Performs prediction using the YOLO model.\n\n Returns:\n list(ultralytics.engine.results.Results): The prediction results.\n \"\"\"\n\n def __init__(self, model: Union[str, Path] = 'yolov8n.pt', task=None) -> None:\n \"\"\"\n Initializes the YOLO model.\n\n Args:\n model (Union[str, Path], optional): Path or name of the model to load or create. Defaults to 'yolov8n.pt'.\n task (Any, optional): Task type for the YOLO model. Defaults to None.\n \"\"\"\n super().__init__()\n self.callbacks = callbacks.get_default_callbacks()\n self.predictor = None # reuse predictor\n self.model = None # model object\n self.trainer = None # trainer object\n self.ckpt = None # if loaded from *.pt\n self.cfg = None # if loaded from *.yaml\n self.ckpt_path = None\n self.overrides = {} # overrides for trainer object\n self.metrics = None # validation/training metrics\n self.session = None # HUB session\n self.task = task # task type\n model = str(model).strip() # strip spaces\n\n # Check if Ultralytics HUB model from https://hub.ultralytics.com\n if self.is_hub_model(model):\n from ultralytics.hub.session import HUBTrainingSession\n self.session = HUBTrainingSession(model)\n model = self.session.model_file\n\n # Check if Triton Server model\n elif self.is_triton_model(model):\n self.model = model\n self.task = task\n return\n\n # Load or create new YOLO model\n model = checks.check_model_file_from_stem(model) # add suffix, i.e. yolov8n -> yolov8n.pt\n if Path(model).suffix in ('.yaml', '.yml'):\n self._new(model, task)\n else:\n self._load(model, task)\n\n def __call__(self, source=None, stream=False, **kwargs):\n \"\"\"Calls the 'predict' function with given arguments to perform object detection.\"\"\"\n return self.predict(source, stream, **kwargs)\n\n @staticmethod\n def is_triton_model(model):\n \"\"\"Is model a Triton Server URL string, i.e. <scheme>://<netloc>/<endpoint>/<task_name>\"\"\"\n from urllib.parse import urlsplit\n url = urlsplit(model)\n return url.netloc and url.path and url.scheme in {'http', 'grfc'}\n\n @staticmethod\n def is_hub_model(model):\n \"\"\"Check if the provided model is a HUB model.\"\"\"\n return any((\n model.startswith(f'{HUB_WEB_ROOT}/models/'), # i.e. https://hub.ultralytics.com/models/MODEL_ID\n [len(x) for x in model.split('_')] == [42, 20], # APIKEY_MODELID\n len(model) == 20 and not Path(model).exists() and all(x not in model for x in './\\\\'))) # MODELID\n\n def _new(self, cfg: str, task=None, model=None, verbose=True):\n \"\"\"\n Initializes a new model and infers the task type from the model definitions.\n\n Args:\n cfg (str): model configuration file\n task (str | None): model task\n model (BaseModel): Customized model.\n verbose (bool): display model info on load\n \"\"\"\n cfg_dict = yaml_model_load(cfg)\n self.cfg = cfg\n self.task = task or guess_model_task(cfg_dict)\n self.model = (model or self._smart_load('model'))(cfg_dict, verbose=verbose and RANK == -1) # build model\n self.overrides['model'] = self.cfg\n self.overrides['task'] = self.task\n\n # Below added to allow export from YAMLs\n self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args)\n self.model.task = self.task\n\n def _load(self, weights: str, task=None):\n \"\"\"\n Initializes a new model and infers the task type from the model head.\n\n Args:\n weights (str): model checkpoint to be loaded\n task (str | None): model task\n \"\"\"\n suffix = Path(weights).suffix\n if suffix == '.pt':\n self.model, self.ckpt = attempt_load_one_weight(weights)\n self.task = self.model.args['task']\n self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)\n self.ckpt_path = self.model.pt_path\n else:\n weights = checks.check_file(weights)\n self.model, self.ckpt = weights, None\n self.task = task or guess_model_task(weights)\n self.ckpt_path = weights\n self.overrides['model'] = weights\n self.overrides['task'] = self.task\n\n def _check_is_pytorch_model(self):\n \"\"\"Raises TypeError is model is not a PyTorch model.\"\"\"\n pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == '.pt'\n pt_module = isinstance(self.model, nn.Module)\n if not (pt_module or pt_str):\n raise TypeError(\n f\"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. \"\n f\"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported \"\n f\"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, \"\n f\"i.e. 'yolo predict model=yolov8n.onnx'.\\nTo run CUDA or MPS inference please pass the device \"\n f\"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'\")\n\n def reset_weights(self):\n \"\"\"Resets the model modules parameters to randomly initialized values, losing all training information.\"\"\"\n self._check_is_pytorch_model()\n for m in self.model.modules():\n if hasattr(m, 'reset_parameters'):\n m.reset_parameters()\n for p in self.model.parameters():\n p.requires_grad = True\n return self\n\n def load(self, weights='yolov8n.pt'):\n \"\"\"Transfers parameters with matching names and shapes from 'weights' to model.\"\"\"\n self._check_is_pytorch_model()\n if isinstance(weights, (str, Path)):\n weights, self.ckpt = attempt_load_one_weight(weights)\n self.model.load(weights)\n return self\n\n def info(self, detailed=False, verbose=True):\n \"\"\"\n Logs model info.\n\n Args:\n detailed (bool): Show detailed information about model.\n verbose (bool): Controls verbosity.\n \"\"\"\n self._check_is_pytorch_model()\n return self.model.info(detailed=detailed, verbose=verbose)\n\n def fuse(self):\n \"\"\"Fuse PyTorch Conv2d and BatchNorm2d layers.\"\"\"\n self._check_is_pytorch_model()\n self.model.fuse()\n\n def predict(self, source=None, stream=False, predictor=None, **kwargs):\n \"\"\"\n Perform prediction using the YOLO model.\n\n Args:\n source (str | int | PIL | np.ndarray): The source of the image to make predictions on.\n Accepts all source types accepted by the YOLO model.\n stream (bool): Whether to stream the predictions or not. Defaults to False.\n predictor (BasePredictor): Customized predictor.\n **kwargs : Additional keyword arguments passed to the predictor.\n Check the 'configuration' section in the documentation for all available options.\n\n Returns:\n (List[ultralytics.engine.results.Results]): The prediction results.\n \"\"\"\n if source is None:\n source = ASSETS\n LOGGER.warning(f\"WARNING ⚠️ 'source' is missing. Using 'source={source}'.\")\n\n is_cli = (sys.argv[0].endswith('yolo') or sys.argv[0].endswith('ultralytics')) and any(\n x in sys.argv for x in ('predict', 'track', 'mode=predict', 'mode=track'))\n\n custom = {'conf': 0.25, 'save': is_cli} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'predict'} # highest priority args on the right\n prompts = args.pop('prompts', None) # for SAM-type models\n\n if not self.predictor:\n self.predictor = (predictor or self._smart_load('predictor'))(overrides=args, _callbacks=self.callbacks)\n self.predictor.setup_model(model=self.model, verbose=is_cli)\n else: # only update args if predictor is already setup\n self.predictor.args = get_cfg(self.predictor.args, args)\n if 'project' in args or 'name' in args:\n self.predictor.save_dir = get_save_dir(self.predictor.args)\n if prompts and hasattr(self.predictor, 'set_prompts'): # for SAM-type models\n self.predictor.set_prompts(prompts)\n return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)\n\n def track(self, source=None, stream=False, persist=False, **kwargs):\n \"\"\"\n Perform object tracking on the input source using the registered trackers.\n\n Args:\n source (str, optional): The input source for object tracking. Can be a file path or a video stream.\n stream (bool, optional): Whether the input source is a video stream. Defaults to False.\n persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.\n **kwargs (optional): Additional keyword arguments for the tracking process.\n\n Returns:\n (List[ultralytics.engine.results.Results]): The tracking results.\n \"\"\"\n if not hasattr(self.predictor, 'trackers'):\n from ultralytics.trackers import register_tracker\n register_tracker(self, persist)\n kwargs['conf'] = kwargs.get('conf') or 0.1 # ByteTrack-based method needs low confidence predictions as input\n kwargs['mode'] = 'track'\n return self.predict(source=source, stream=stream, **kwargs)\n\n def val(self, validator=None, **kwargs):\n \"\"\"\n Validate a model on a given dataset.\n\n Args:\n validator (BaseValidator): Customized validator.\n **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs\n \"\"\"\n custom = {'rect': True} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'val'} # highest priority args on the right\n\n validator = (validator or self._smart_load('validator'))(args=args, _callbacks=self.callbacks)\n validator(model=self.model)\n self.metrics = validator.metrics\n return validator.metrics\n\n def benchmark(self, **kwargs):\n \"\"\"\n Benchmark a model on all export formats.\n\n Args:\n **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs\n \"\"\"\n self._check_is_pytorch_model()\n from ultralytics.utils.benchmarks import benchmark\n\n custom = {'verbose': False} # method defaults\n args = {**DEFAULT_CFG_DICT, **self.model.args, **custom, **kwargs, 'mode': 'benchmark'}\n return benchmark(\n model=self,\n data=kwargs.get('data'), # if no 'data' argument passed set data=None for default datasets\n imgsz=args['imgsz'],\n half=args['half'],\n int8=args['int8'],\n device=args['device'],\n verbose=kwargs.get('verbose'))\n\n def export(self, **kwargs):\n \"\"\"\n Export model.\n\n Args:\n **kwargs : Any other args accepted by the Exporter. To see all args check 'configuration' section in docs.\n \"\"\"\n self._check_is_pytorch_model()\n from .exporter import Exporter\n\n custom = {'imgsz': self.model.args['imgsz'], 'batch': 1, 'data': None, 'verbose': False} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'export'} # highest priority args on the right\n return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)\n\n def train(self, trainer=None, **kwargs):\n \"\"\"\n Trains the model on a given dataset.\n\n Args:\n trainer (BaseTrainer, optional): Customized trainer.\n **kwargs (Any): Any number of arguments representing the training configuration.\n \"\"\"\n self._check_is_pytorch_model()\n if self.session: # Ultralytics HUB session\n if any(kwargs):\n LOGGER.warning('WARNING ⚠️ using HUB training arguments, ignoring local training arguments.')\n kwargs = self.session.train_args\n checks.check_pip_update_available()\n\n overrides = yaml_load(checks.check_yaml(kwargs['cfg'])) if kwargs.get('cfg') else self.overrides\n custom = {'data': TASK2DATA[self.task]} # method defaults\n args = {**overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right\n if args.get('resume'):\n args['resume'] = self.ckpt_path\n\n self.trainer = (trainer or self._smart_load('trainer'))(overrides=args, _callbacks=self.callbacks)\n if not args.get('resume'): # manually set model only if not resuming\n self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)\n self.model = self.trainer.model\n self.trainer.hub_session = self.session # attach optional HUB session\n self.trainer.train()\n # Update model and cfg after training\n if RANK in (-1, 0):\n ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last\n self.model, _ = attempt_load_one_weight(ckpt)\n self.overrides = self.model.args\n self.metrics = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP\n return self.metrics\n\n def tune(self, use_ray=False, iterations=10, *args, **kwargs):\n \"\"\"\n Runs hyperparameter tuning, optionally using Ray Tune. See ultralytics.utils.tuner.run_ray_tune for Args.\n\n Returns:\n (dict): A dictionary containing the results of the hyperparameter search.\n \"\"\"\n self._check_is_pytorch_model()\n if use_ray:\n from ultralytics.utils.tuner import run_ray_tune\n return run_ray_tune(self, max_samples=iterations, *args, **kwargs)\n else:\n from .tuner import Tuner\n\n custom = {} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right\n return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)\n\n def _apply(self, fn):\n \"\"\"Apply to(), cpu(), cuda(), half(), float() to model tensors that are not parameters or registered buffers.\"\"\"\n self._check_is_pytorch_model()\n self = super()._apply(fn) # noqa\n self.predictor = None # reset predictor as device may have changed\n self.overrides['device'] = self.device # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0'\n return self\n\n @property\n def names(self):\n \"\"\"Returns class names of the loaded model.\"\"\"\n return self.model.names if hasattr(self.model, 'names') else None\n\n @property\n def device(self):\n \"\"\"Returns device if PyTorch model.\"\"\"\n return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None\n\n @property\n def transforms(self):\n \"\"\"Returns transform of the loaded model.\"\"\"\n return self.model.transforms if hasattr(self.model, 'transforms') else None\n\n def add_callback(self, event: str, func):\n \"\"\"Add a callback.\"\"\"\n self.callbacks[event].append(func)\n\n def clear_callback(self, event: str):\n \"\"\"Clear all event callbacks.\"\"\"\n self.callbacks[event] = []\n\n def reset_callbacks(self):\n \"\"\"Reset all registered callbacks.\"\"\"\n for event in callbacks.default_callbacks.keys():\n self.callbacks[event] = [callbacks.default_callbacks[event][0]]\n\n @staticmethod\n def _reset_ckpt_args(args):\n \"\"\"Reset arguments when loading a PyTorch model.\"\"\"\n include = {'imgsz', 'data', 'task', 'single_cls'} # only remember these arguments when loading a PyTorch model\n return {k: v for k, v in args.items() if k in include}\n\n # def __getattr__(self, attr):\n # \"\"\"Raises error if object has no requested attribute.\"\"\"\n # name = self.__class__.__name__\n # raise AttributeError(f\"'{name}' object has no attribute '{attr}'. See valid attributes below.\\n{self.__doc__}\")\n\n def _smart_load(self, key):\n \"\"\"Load model/trainer/validator/predictor.\"\"\"\n try:\n return self.task_map[self.task][key]\n except Exception as e:\n name = self.__class__.__name__\n mode = inspect.stack()[1][3] # get the function name.\n raise NotImplementedError(\n emojis(f\"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.\")) from e\n\n @property\n def task_map(self):\n \"\"\"\n Map head to model, trainer, validator, and predictor classes.\n\n Returns:\n task_map (dict): The map of model task to mode classes.\n \"\"\"\n raise NotImplementedError('Please provide task map for your model!')" }, { "identifier": "model_info", "path": "ultralytics/utils/torch_utils.py", "snippet": "def model_info(model, detailed=False, verbose=True, imgsz=640):\n \"\"\"\n Model information.\n\n imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].\n \"\"\"\n if not verbose:\n return\n n_p = get_num_params(model) # number of parameters\n n_g = get_num_gradients(model) # number of gradients\n n_l = len(list(model.modules())) # number of layers\n if detailed:\n LOGGER.info(\n f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n LOGGER.info('%5g %40s %9s %12g %20s %10.3g %10.3g %10s' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype))\n\n flops = get_flops(model, imgsz)\n fused = ' (fused)' if getattr(model, 'is_fused', lambda: False)() else ''\n fs = f', {flops:.1f} GFLOPs' if flops else ''\n yaml_file = getattr(model, 'yaml_file', '') or getattr(model, 'yaml', {}).get('yaml_file', '')\n model_name = Path(yaml_file).stem.replace('yolo', 'YOLO') or 'Model'\n LOGGER.info(f'{model_name} summary{fused}: {n_l} layers, {n_p} parameters, {n_g} gradients{fs}')\n return n_l, n_p, n_g, flops" }, { "identifier": "build_sam", "path": "ultralytics/models/sam/build.py", "snippet": "def build_sam(ckpt='sam_b.pt'):\n \"\"\"Build a SAM model specified by ckpt.\"\"\"\n model_builder = None\n ckpt = str(ckpt) # to allow Path ckpt types\n for k in sam_model_map.keys():\n if ckpt.endswith(k):\n model_builder = sam_model_map.get(k)\n\n if not model_builder:\n raise FileNotFoundError(f'{ckpt} is not a supported SAM model. Available models are: \\n {sam_model_map.keys()}')\n\n return model_builder(ckpt)" }, { "identifier": "Predictor", "path": "ultralytics/models/sam/predict.py", "snippet": "class Predictor(BasePredictor):\n \"\"\"\n Predictor class for the Segment Anything Model (SAM), extending BasePredictor.\n\n The class provides an interface for model inference tailored to image segmentation tasks.\n With advanced architecture and promptable segmentation capabilities, it facilitates flexible and real-time\n mask generation. The class is capable of working with various types of prompts such as bounding boxes,\n points, and low-resolution masks.\n\n Attributes:\n cfg (dict): Configuration dictionary specifying model and task-related parameters.\n overrides (dict): Dictionary containing values that override the default configuration.\n _callbacks (dict): Dictionary of user-defined callback functions to augment behavior.\n args (namespace): Namespace to hold command-line arguments or other operational variables.\n im (torch.Tensor): Preprocessed input image tensor.\n features (torch.Tensor): Extracted image features used for inference.\n prompts (dict): Collection of various prompt types, such as bounding boxes and points.\n segment_all (bool): Flag to control whether to segment all objects in the image or only specified ones.\n \"\"\"\n\n def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):\n \"\"\"\n Initialize the Predictor with configuration, overrides, and callbacks.\n\n The method sets up the Predictor object and applies any configuration overrides or callbacks provided. It\n initializes task-specific settings for SAM, such as retina_masks being set to True for optimal results.\n\n Args:\n cfg (dict): Configuration dictionary.\n overrides (dict, optional): Dictionary of values to override default configuration.\n _callbacks (dict, optional): Dictionary of callback functions to customize behavior.\n \"\"\"\n if overrides is None:\n overrides = {}\n overrides.update(dict(task='segment', mode='predict', imgsz=1024))\n super().__init__(cfg, overrides, _callbacks)\n self.args.retina_masks = True\n self.im = None\n self.features = None\n self.prompts = {}\n self.segment_all = False\n\n def preprocess(self, im):\n \"\"\"\n Preprocess the input image for model inference.\n\n The method prepares the input image by applying transformations and normalization.\n It supports both torch.Tensor and list of np.ndarray as input formats.\n\n Args:\n im (torch.Tensor | List[np.ndarray]): BCHW tensor format or list of HWC numpy arrays.\n\n Returns:\n (torch.Tensor): The preprocessed image tensor.\n \"\"\"\n if self.im is not None:\n return self.im\n not_tensor = not isinstance(im, torch.Tensor)\n if not_tensor:\n im = np.stack(self.pre_transform(im))\n im = im[..., ::-1].transpose((0, 3, 1, 2))\n im = np.ascontiguousarray(im)\n im = torch.from_numpy(im)\n\n im = im.to(self.device)\n im = im.half() if self.model.fp16 else im.float()\n if not_tensor:\n im = (im - self.mean) / self.std\n return im\n\n def pre_transform(self, im):\n \"\"\"\n Perform initial transformations on the input image for preprocessing.\n\n The method applies transformations such as resizing to prepare the image for further preprocessing.\n Currently, batched inference is not supported; hence the list length should be 1.\n\n Args:\n im (List[np.ndarray]): List containing images in HWC numpy array format.\n\n Returns:\n (List[np.ndarray]): List of transformed images.\n \"\"\"\n assert len(im) == 1, 'SAM model does not currently support batched inference'\n letterbox = LetterBox(self.args.imgsz, auto=False, center=False)\n return [letterbox(image=x) for x in im]\n\n def inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False, *args, **kwargs):\n \"\"\"\n Perform image segmentation inference based on the given input cues, using the currently loaded image. This\n method leverages SAM's (Segment Anything Model) architecture consisting of image encoder, prompt encoder, and\n mask decoder for real-time and promptable segmentation tasks.\n\n Args:\n im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).\n bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.\n points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates.\n labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background.\n masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256.\n multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.\n\n Returns:\n (tuple): Contains the following three elements.\n - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks.\n - np.ndarray: An array of length C containing quality scores predicted by the model for each mask.\n - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256.\n \"\"\"\n # Override prompts if any stored in self.prompts\n bboxes = self.prompts.pop('bboxes', bboxes)\n points = self.prompts.pop('points', points)\n masks = self.prompts.pop('masks', masks)\n\n if all(i is None for i in [bboxes, points, masks]):\n return self.generate(im, *args, **kwargs)\n\n return self.prompt_inference(im, bboxes, points, labels, masks, multimask_output)\n\n def prompt_inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False):\n \"\"\"\n Internal function for image segmentation inference based on cues like bounding boxes, points, and masks.\n Leverages SAM's specialized architecture for prompt-based, real-time segmentation.\n\n Args:\n im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).\n bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.\n points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates.\n labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background.\n masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256.\n multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.\n\n Returns:\n (tuple): Contains the following three elements.\n - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks.\n - np.ndarray: An array of length C containing quality scores predicted by the model for each mask.\n - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256.\n \"\"\"\n features = self.model.image_encoder(im) if self.features is None else self.features\n\n src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]\n r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])\n # Transform input prompts\n if points is not None:\n points = torch.as_tensor(points, dtype=torch.float32, device=self.device)\n points = points[None] if points.ndim == 1 else points\n # Assuming labels are all positive if users don't pass labels.\n if labels is None:\n labels = np.ones(points.shape[0])\n labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)\n points *= r\n # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)\n points, labels = points[:, None, :], labels[:, None]\n if bboxes is not None:\n bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)\n bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes\n bboxes *= r\n if masks is not None:\n masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)\n\n points = (points, labels) if points is not None else None\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)\n\n # Predict masks\n pred_masks, pred_scores = self.model.mask_decoder(\n image_embeddings=features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )\n # `d` could be 1 or 3 depends on `multimask_output`.\n return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)\n\n def generate(self,\n im,\n crop_n_layers=0,\n crop_overlap_ratio=512 / 1500,\n crop_downscale_factor=1,\n point_grids=None,\n points_stride=32,\n points_batch_size=64,\n conf_thres=0.88,\n stability_score_thresh=0.95,\n stability_score_offset=0.95,\n crop_nms_thresh=0.7):\n \"\"\"\n Perform image segmentation using the Segment Anything Model (SAM).\n\n This function segments an entire image into constituent parts by leveraging SAM's advanced architecture\n and real-time performance capabilities. It can optionally work on image crops for finer segmentation.\n\n Args:\n im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W).\n crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops.\n Each layer produces 2**i_layer number of image crops.\n crop_overlap_ratio (float): Determines the extent of overlap between crops. Scaled down in subsequent layers.\n crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer.\n point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1].\n Used in the nth crop layer.\n points_stride (int, optional): Number of points to sample along each side of the image.\n Exclusive with 'point_grids'.\n points_batch_size (int): Batch size for the number of points processed simultaneously.\n conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction.\n stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability.\n stability_score_offset (float): Offset value for calculating stability score.\n crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops.\n\n Returns:\n (tuple): A tuple containing segmented masks, confidence scores, and bounding boxes.\n \"\"\"\n self.segment_all = True\n ih, iw = im.shape[2:]\n crop_regions, layer_idxs = generate_crop_boxes((ih, iw), crop_n_layers, crop_overlap_ratio)\n if point_grids is None:\n point_grids = build_all_layer_point_grids(points_stride, crop_n_layers, crop_downscale_factor)\n pred_masks, pred_scores, pred_bboxes, region_areas = [], [], [], []\n for crop_region, layer_idx in zip(crop_regions, layer_idxs):\n x1, y1, x2, y2 = crop_region\n w, h = x2 - x1, y2 - y1\n area = torch.tensor(w * h, device=im.device)\n points_scale = np.array([[w, h]]) # w, h\n # Crop image and interpolate to input size\n crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode='bilinear', align_corners=False)\n # (num_points, 2)\n points_for_image = point_grids[layer_idx] * points_scale\n crop_masks, crop_scores, crop_bboxes = [], [], []\n for (points, ) in batch_iterator(points_batch_size, points_for_image):\n pred_mask, pred_score = self.prompt_inference(crop_im, points=points, multimask_output=True)\n # Interpolate predicted masks to input size\n pred_mask = F.interpolate(pred_mask[None], (h, w), mode='bilinear', align_corners=False)[0]\n idx = pred_score > conf_thres\n pred_mask, pred_score = pred_mask[idx], pred_score[idx]\n\n stability_score = calculate_stability_score(pred_mask, self.model.mask_threshold,\n stability_score_offset)\n idx = stability_score > stability_score_thresh\n pred_mask, pred_score = pred_mask[idx], pred_score[idx]\n # Bool type is much more memory-efficient.\n pred_mask = pred_mask > self.model.mask_threshold\n # (N, 4)\n pred_bbox = batched_mask_to_box(pred_mask).float()\n keep_mask = ~is_box_near_crop_edge(pred_bbox, crop_region, [0, 0, iw, ih])\n if not torch.all(keep_mask):\n pred_bbox, pred_mask, pred_score = pred_bbox[keep_mask], pred_mask[keep_mask], pred_score[keep_mask]\n\n crop_masks.append(pred_mask)\n crop_bboxes.append(pred_bbox)\n crop_scores.append(pred_score)\n\n # Do nms within this crop\n crop_masks = torch.cat(crop_masks)\n crop_bboxes = torch.cat(crop_bboxes)\n crop_scores = torch.cat(crop_scores)\n keep = torchvision.ops.nms(crop_bboxes, crop_scores, self.args.iou) # NMS\n crop_bboxes = uncrop_boxes_xyxy(crop_bboxes[keep], crop_region)\n crop_masks = uncrop_masks(crop_masks[keep], crop_region, ih, iw)\n crop_scores = crop_scores[keep]\n\n pred_masks.append(crop_masks)\n pred_bboxes.append(crop_bboxes)\n pred_scores.append(crop_scores)\n region_areas.append(area.expand(len(crop_masks)))\n\n pred_masks = torch.cat(pred_masks)\n pred_bboxes = torch.cat(pred_bboxes)\n pred_scores = torch.cat(pred_scores)\n region_areas = torch.cat(region_areas)\n\n # Remove duplicate masks between crops\n if len(crop_regions) > 1:\n scores = 1 / region_areas\n keep = torchvision.ops.nms(pred_bboxes, scores, crop_nms_thresh)\n pred_masks, pred_bboxes, pred_scores = pred_masks[keep], pred_bboxes[keep], pred_scores[keep]\n\n return pred_masks, pred_scores, pred_bboxes\n\n def setup_model(self, model, verbose=True):\n \"\"\"\n Initializes the Segment Anything Model (SAM) for inference.\n\n This method sets up the SAM model by allocating it to the appropriate device and initializing the necessary\n parameters for image normalization and other Ultralytics compatibility settings.\n\n Args:\n model (torch.nn.Module): A pre-trained SAM model. If None, a model will be built based on configuration.\n verbose (bool): If True, prints selected device information.\n\n Attributes:\n model (torch.nn.Module): The SAM model allocated to the chosen device for inference.\n device (torch.device): The device to which the model and tensors are allocated.\n mean (torch.Tensor): The mean values for image normalization.\n std (torch.Tensor): The standard deviation values for image normalization.\n \"\"\"\n device = select_device(self.args.device, verbose=verbose)\n if model is None:\n model = build_sam(self.args.model)\n model.eval()\n self.model = model.to(device)\n self.device = device\n self.mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1).to(device)\n self.std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1).to(device)\n\n # Ultralytics compatibility settings\n self.model.pt = False\n self.model.triton = False\n self.model.stride = 32\n self.model.fp16 = False\n self.done_warmup = True\n\n def postprocess(self, preds, img, orig_imgs):\n \"\"\"\n Post-processes SAM's inference outputs to generate object detection masks and bounding boxes.\n\n The method scales masks and boxes to the original image size and applies a threshold to the mask predictions. The\n SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance.\n\n Args:\n preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes.\n img (torch.Tensor): The processed input image tensor.\n orig_imgs (list | torch.Tensor): The original, unprocessed images.\n\n Returns:\n (list): List of Results objects containing detection masks, bounding boxes, and other metadata.\n \"\"\"\n # (N, 1, H, W), (N, 1)\n pred_masks, pred_scores = preds[:2]\n pred_bboxes = preds[2] if self.segment_all else None\n names = dict(enumerate(str(i) for i in range(len(pred_masks))))\n\n if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list\n orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)\n\n results = []\n for i, masks in enumerate([pred_masks]):\n orig_img = orig_imgs[i]\n if pred_bboxes is not None:\n pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)\n cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)\n pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1)\n\n masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]\n masks = masks > self.model.mask_threshold # to bool\n img_path = self.batch[0][i]\n results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))\n # Reset segment-all mode.\n self.segment_all = False\n return results\n\n def setup_source(self, source):\n \"\"\"\n Sets up the data source for inference.\n\n This method configures the data source from which images will be fetched for inference. The source could be a\n directory, a video file, or other types of image data sources.\n\n Args:\n source (str | Path): The path to the image data source for inference.\n \"\"\"\n if source is not None:\n super().setup_source(source)\n\n def set_image(self, image):\n \"\"\"\n Preprocesses and sets a single image for inference.\n\n This function sets up the model if not already initialized, configures the data source to the specified image,\n and preprocesses the image for feature extraction. Only one image can be set at a time.\n\n Args:\n image (str | np.ndarray): Image file path as a string, or a np.ndarray image read by cv2.\n\n Raises:\n AssertionError: If more than one image is set.\n \"\"\"\n if self.model is None:\n model = build_sam(self.args.model)\n self.setup_model(model)\n self.setup_source(image)\n assert len(self.dataset) == 1, '`set_image` only supports setting one image!'\n for batch in self.dataset:\n im = self.preprocess(batch[1])\n self.features = self.model.image_encoder(im)\n self.im = im\n break\n\n def set_prompts(self, prompts):\n \"\"\"Set prompts in advance.\"\"\"\n self.prompts = prompts\n\n def reset_image(self):\n \"\"\"Resets the image and its features to None.\"\"\"\n self.im = None\n self.features = None\n\n @staticmethod\n def remove_small_regions(masks, min_area=0, nms_thresh=0.7):\n \"\"\"\n Perform post-processing on segmentation masks generated by the Segment Anything Model (SAM). Specifically, this\n function removes small disconnected regions and holes from the input masks, and then performs Non-Maximum\n Suppression (NMS) to eliminate any newly created duplicate boxes.\n\n Args:\n masks (torch.Tensor): A tensor containing the masks to be processed. Shape should be (N, H, W), where N is\n the number of masks, H is height, and W is width.\n min_area (int): The minimum area below which disconnected regions and holes will be removed. Defaults to 0.\n nms_thresh (float): The IoU threshold for the NMS algorithm. Defaults to 0.7.\n\n Returns:\n (tuple([torch.Tensor, List[int]])):\n - new_masks (torch.Tensor): The processed masks with small regions removed. Shape is (N, H, W).\n - keep (List[int]): The indices of the remaining masks post-NMS, which can be used to filter the boxes.\n \"\"\"\n if len(masks) == 0:\n return masks\n\n # Filter small disconnected regions and holes\n new_masks = []\n scores = []\n for mask in masks:\n mask = mask.cpu().numpy().astype(np.uint8)\n mask, changed = remove_small_regions(mask, min_area, mode='holes')\n unchanged = not changed\n mask, changed = remove_small_regions(mask, min_area, mode='islands')\n unchanged = unchanged and not changed\n\n new_masks.append(torch.as_tensor(mask).unsqueeze(0))\n # Give score=0 to changed masks and 1 to unchanged masks so NMS prefers masks not needing postprocessing\n scores.append(float(unchanged))\n\n # Recalculate boxes and remove any new duplicates\n new_masks = torch.cat(new_masks, dim=0)\n boxes = batched_mask_to_box(new_masks)\n keep = torchvision.ops.nms(boxes.float(), torch.as_tensor(scores), nms_thresh)\n\n return new_masks[keep].to(device=masks.device, dtype=masks.dtype), keep" } ]
from pathlib import Path from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info from .build import build_sam from .predict import Predictor
10,990
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """
self.model = build_sam(weights)
2
2023-11-16 12:49:59+00:00
16k
Aues6uen11Z/Zafkiel
tests/test.py
[ { "identifier": "logger", "path": "zafkiel/logger.py", "snippet": "" }, { "identifier": "Config", "path": "zafkiel/config.py", "snippet": "class Config:\n ST = Settings\n ST.CVSTRATEGY = [\"mstpl\", \"sift\"]\n ST.THRESHOLD = 0.8\n\n GAME_PATH = None\n SERVER_LANG = 'cn'\n\n # Top, left and bottom boundary pixel values when running in a bordered program\n # The value on my Win10 computer, may not accurate for everyone.\n BORDER = (32, 3, 2)" }, { "identifier": "API", "path": "zafkiel/device/api.py", "snippet": "class API:\n \"\"\"\n Device Setup APIs\n \"\"\"\n\n @staticmethod\n def init_device(platform=\"Android\", uuid=None, **kwargs):\n return init_device(platform, uuid, **kwargs)\n\n @staticmethod\n def connect_device(uri):\n return connect_device(uri)\n\n @staticmethod\n def device():\n return device()\n\n @staticmethod\n def set_current(idx):\n set_current(idx)\n\n @staticmethod\n def auto_setup(\n basedir: str = None,\n devices: list = None,\n firing_time: int = 30,\n logdir: bool = None,\n project_root: str = None,\n compress: int = None\n ):\n \"\"\"\n Auto setup running env and try to connect device if no device is connected.\n\n Args:\n basedir: basedir of script, __file__ is also acceptable.\n devices: connect_device uri in list.\n firing_time: Game starts taking time, this value should be set larger in old machine.\n logdir: log dir for script report, default is None for no log, set to ``True`` for ``<basedir>/log``.\n project_root: Project root dir for `using` api.\n compress: The compression rate of the screenshot image, integer in range [1, 99], default is 10\n\n Examples:\n auto_setup(__file__)\n auto_setup(__file__, devices=[\"Android://127.0.0.1:5037/SJE5T17B17\"],\n ... logdir=True, project_root=r\"D:\\\\test\\\\logs\", compress=90)\n \"\"\"\n if basedir:\n if os.path.isfile(basedir):\n basedir = os.path.dirname(basedir)\n if basedir not in G.BASEDIR:\n G.BASEDIR.append(basedir)\n if devices:\n startup_time = Timer(firing_time).start()\n for dev in devices:\n while not startup_time.reached():\n try:\n connect_device(dev)\n break\n except ElementNotFoundError:\n time.sleep(3)\n if startup_time.reached():\n raise NotRunningError(dev)\n if logdir:\n logdir = script_log_dir(basedir, logdir)\n set_logdir(logdir)\n if project_root:\n ST.PROJECT_ROOT = project_root\n if compress:\n ST.SNAPSHOT_QUALITY = compress\n\n \"\"\"\n Device Operations\n \"\"\"\n\n @staticmethod\n def app_is_running() -> bool:\n \"\"\"\n Platforms:\n Windows\n\n Returns:\n Whether app is running\n \"\"\"\n return G.DEVICE.app_is_running()\n\n @staticmethod\n def stop_app(package=None):\n \"\"\"\n Stop the target application on device\n\n Return:\n Has the Windows application stopped, on Android and iOS no return.\n\n Platforms:\n Android, iOS, Windows\n\n Example:\n stop_app(\"com.netease.cloudmusic\")\n stop_app() # only test on Windows\n \"\"\"\n return G.DEVICE.stop_app(package)\n\n @staticmethod\n @logwrap\n def touch(\n v: Template or tuple,\n times: int = 1,\n blind: bool = False,\n interval: float = 0.05,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the touch action on the device screen\n\n Args:\n v: Target to touch, either a ``ImageTemplate`` instance or absolute coordinates (x, y).\n times: How many touches to be performed\n blind: Whether to recognize Template, sometimes we only need to click without caring about the image.\n interval: Time interval between two touches.\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n **kwargs: Platform specific `kwargs`, please refer to corresponding docs.\n\n Returns:\n Final position to be clicked, e.g. (100, 100)\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n Click absolute coordinates:\n touch((100, 100))\n Click 2 times:\n touch((100, 100), times=2)\n Under Android and Windows platforms, you can set the click duration:\n touch((100, 100), duration=2)\n Right click(Windows):\n touch((100, 100), right_click=True)\n \"\"\"\n if isinstance(v, Template):\n if blind:\n center_pos = (v.area[2] + v.area[0]) / 2, (v.area[3] + v.area[1]) / 2\n else:\n center_pos = loop_find(v, timeout=ST.FIND_TIMEOUT, cls=cls, ocr_mode=ocr_mode)\n\n h = v.height * v.ratio()\n w = v.width * v.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(center_pos, h, w)\n else:\n try_log_screen()\n pos = v\n for _ in range(times):\n G.DEVICE.touch(pos, **kwargs)\n time.sleep(interval)\n delay_after_operation()\n return pos\n\n @logwrap\n def find_click(\n self,\n rec_template: Template,\n touch_template: Template = None,\n times: int = 1,\n timeout: float = 1,\n blind: bool = False,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> bool:\n \"\"\"\n Find the template image and click it or another image area.\n\n Args:\n rec_template: \"Template\" instance to be found.\n touch_template: \"ImageTemplate\" instance to be clicked, defaults to None which means click rec_template.\n times: How many touches to be performed.\n timeout: Time interval to wait for the match.\n blind: Whether to recognize Template, same as parameter of touch().\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n bool: Whether the target image appear and click it.\n \"\"\"\n try:\n pos = self.wait(rec_template, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n h = rec_template.height * rec_template.ratio()\n w = rec_template.width * rec_template.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(pos, h, w)\n except TargetNotFoundError:\n return False\n\n if touch_template:\n self.touch(touch_template, times, blind, ocr_mode=ocr_mode, cls=cls)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{touch_template.name}\")\n else:\n self.touch(pos, times)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{rec_template.name}\")\n return True\n\n @staticmethod\n @logwrap\n def exists(v: Template, timeout: float = 0, ocr_mode: int = 0, cls: Type[Ocr] = Ocr) -> bool or tuple:\n \"\"\"\n Check whether given target exists on device screen\n\n Args:\n v: target to be checked\n timeout: time limit, default is 0 which means loop_find will only search once\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n False if target is not found, otherwise returns the coordinates of the target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n if exists(ImageTemplate(r\"tpl1606822430589.png\")):\n touch(ImageTemplate(r\"tpl1606822430589.png\"))\n\n Since ``exists()`` will return the coordinates,\n we can directly click on this return value to reduce one image search:\n\n pos = exists(ImageTemplate(r\"tpl1606822430589.png\"))\n if pos:\n touch(pos)\n \"\"\"\n try:\n pos = loop_find(v, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n except TargetNotFoundError:\n return False\n else:\n return pos\n\n @staticmethod\n @logwrap\n def wait(\n v: Template,\n timeout: float = None,\n interval: float = 0.5,\n interval_func: Callable = None,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> tuple:\n \"\"\"\n Wait to match the Template on the device screen\n\n Args:\n v: target object to wait for, Template instance\n timeout: time interval to wait for the match, default is None which is ``ST.FIND_TIMEOUT``\n interval: time interval in seconds to attempt to find a match\n interval_func: called after each unsuccessful attempt to find the corresponding match\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Raises:\n TargetNotFoundError: raised if target is not found after the time limit expired\n\n Returns:\n coordinates of the matched target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n wait(Template(r\"tpl1606821804906.png\")) # timeout after ST.FIND_TIMEOUT\n # find Template every 3 seconds, timeout after 120 seconds\n wait(Template(r\"tpl1606821804906.png\"), timeout=120, interval=3)\n\n You can specify a callback function every time the search target fails::\n\n def notfound():\n print(\"No target found\")\n wait(Template(r\"tpl1607510661400.png\"), interval_func=notfound)\n \"\"\"\n if timeout is None:\n timeout = ST.FIND_TIMEOUT\n pos = loop_find(v, timeout=timeout, interval=interval, interval_func=interval_func, ocr_mode=ocr_mode, cls=cls)\n\n return pos\n\n @staticmethod\n def swipe(\n v1: Template or tuple,\n v2: Template or tuple = None,\n vector: tuple = None,\n blind1: bool = False,\n blind2: bool = False,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the swipe action on the device screen.\n\n There are two ways of assigning the parameters\n * ``swipe(v1, v2=Template(...))`` # swipe from v1 to v2\n * ``swipe(v1, vector=(x, y))`` # swipe starts at v1 and moves along the vector.\n\n Args:\n v1: the start point of swipe, either a Template instance or absolute coordinates (x, y)\n v2: the end point of swipe, either a Template instance or absolute coordinates (x, y)\n vector: a vector coordinates of swipe action, either absolute coordinates (x, y) or percentage of\n screen e.g.(0.5, 0.5)\n blind1: Whether to recognize Template1, same as parameter of touch().\n blind2: Whether to recognize Template2, same as parameter of touch().\n **kwargs: platform specific `kwargs`, please refer to corresponding docs\n\n Raises:\n general exception when not enough parameters to perform swap action have been provided\n\n Returns:\n Origin position and target position\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n swipe(Template(r\"tpl1606814865574.png\"), vector=[-0.0316, -0.3311])\n swipe((100, 100), (200, 200))\n\n Custom swiping duration and number of steps(Android and iOS)::\n\n # swiping lasts for 1 second, divided into 6 steps\n swipe((100, 100), (200, 200), duration=1, steps=6)\n \"\"\"\n if isinstance(v1, Template):\n if blind1:\n pos1 = (v1.area[2] + v1.area[0]) / 2, (v1.area[3] + v1.area[1]) / 2\n else:\n pos1 = loop_find(v1, timeout=ST.FIND_TIMEOUT)\n else:\n try_log_screen()\n pos1 = v1\n\n if v2:\n if isinstance(v2, Template):\n if blind2:\n pos2 = (v2.area[2] + v2.area[0]) / 2, (v2.area[3] + v2.area[1]) / 2\n else:\n pos2 = loop_find(v2, timeout=ST.FIND_TIMEOUT_TMP)\n else:\n pos2 = v2\n elif vector:\n if vector[0] <= 1 and vector[1] <= 1:\n w, h = G.DEVICE.get_current_resolution()\n vector = (int(vector[0] * w), int(vector[1] * h))\n pos2 = (pos1[0] + vector[0], pos1[1] + vector[1])\n else:\n raise ScriptError(\"no enough params for swipe\")\n\n G.DEVICE.swipe(pos1, pos2, **kwargs)\n delay_after_operation()\n logger.info(f\"Swipe {pos1} -> {pos2}\")\n return pos1, pos2\n\n @staticmethod\n def screenshot():\n \"\"\"\n Returns:\n Screenshot image\n \"\"\"\n return G.DEVICE.snapshot(filename=None, quality=ST.SNAPSHOT_QUALITY)\n\n @staticmethod\n def snapshot(filename=None, msg=\"\", quality=None, max_size=None):\n \"\"\"\n Returns:\n {\"screen\": filename, \"resolution\": resolution of the screen} or None\n \"\"\"\n return snapshot(filename, msg, quality, max_size)\n\n @staticmethod\n def shell(cmd):\n return shell(cmd)\n\n @staticmethod\n def start_app(package, activity=None):\n start_app(package, activity)\n\n @staticmethod\n def clear_app(package):\n clear_app(package)\n\n @staticmethod\n def install(filepath, **kwargs):\n return install(filepath, **kwargs)\n\n @staticmethod\n def uninstall(package):\n return uninstall(package)\n\n @staticmethod\n def wake():\n wake()\n\n @staticmethod\n def home():\n home()\n\n @staticmethod\n def double_click(v):\n return double_click(v)\n\n @staticmethod\n def pinch(in_or_out='in', center=None, percent=0.5):\n pinch(in_or_out, center, percent)\n\n @staticmethod\n def key_event(keyname, **kwargs):\n keyevent(keyname, **kwargs)\n\n @staticmethod\n def text(txt, enter=True, **kwargs):\n text(txt, enter, **kwargs)\n\n @staticmethod\n def sleep(secs=1.0):\n sleep(secs)\n\n @staticmethod\n def find_all(v):\n return find_all(v)\n\n @staticmethod\n def get_clipboard(*args, **kwargs):\n return get_clipboard(*args, **kwargs)\n\n @staticmethod\n def set_clipboard(content, *args, **kwargs):\n set_clipboard(content, *args, **kwargs)" }, { "identifier": "Template", "path": "zafkiel/device/template.py", "snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n def filepath(self) -> str:\n def name(self) -> str:\n def image(self) -> ndarray:\n def height(self) -> int:\n def width(self) -> int:\n def _has_border(self) -> bool:\n def ratio(self, screen_height: float = None) -> float:\n def area(self) -> tuple:" }, { "identifier": "Timer", "path": "zafkiel/timer.py", "snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/timer.py\n\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise, it goes wrong if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make program run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__" }, { "identifier": "simple_report", "path": "zafkiel/report.py", "snippet": "def simple_report(filepath, log_path=True, logfile=None, output=HTML_FILE):\n path, name = script_dir_name(filepath)\n if log_path is True:\n log_path = os.path.join(path, getattr(Config, \"LOG_DIR\", DEFAULT_LOG_DIR))\n rpt = HtmlReport(path, log_path, logfile=logfile or getattr(Config, \"LOG_FILE\", DEFAULT_LOG_FILE), script_name=name)\n rpt.report(HTML_TPL, output_file=output)" }, { "identifier": "Keyword", "path": "zafkiel/ocr/keyword.py", "snippet": "class Keyword:\n cn: str = ''\n cht: str = ''\n en: str = ''\n jp: str = ''\n # id: int # To be considered\n name: str = ''\n\n \"\"\"\n Instance attributes and methods\n TODO: Error handling for missing attributes\n \"\"\"\n\n @cached_property\n def ch(self) -> str:\n return self.cn\n\n @cached_property\n def cn_parsed(self) -> str:\n return parse_name(self.cn)\n\n @cached_property\n def en_parsed(self) -> str:\n return parse_name(self.en)\n\n @cached_property\n def jp_parsed(self) -> str:\n return parse_name(self.jp)\n\n @cached_property\n def cht_parsed(self) -> str:\n return parse_name(self.cht)\n\n def __str__(self):\n keyword_list = []\n for keyword in [self.cn, self.cht, self.en, self.jp]:\n if keyword != '':\n keyword_list.append(keyword)\n return f\"{self.__class__.__name__}({self.name})->{'/'.join(keyword_list)}\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True\n\n def keywords_to_find(self, lang: str = None, ignore_punctuation: bool = True):\n if lang is None:\n lang = Config.SERVER_LANG\n\n # TODO: fix this refer to SRC\n if lang == 'cn':\n if ignore_punctuation:\n return [self.cn_parsed]\n else:\n return [self.cn]\n elif lang == 'en':\n if ignore_punctuation:\n return [self.en_parsed]\n else:\n return [self.en]\n elif lang == 'jp':\n if ignore_punctuation:\n return [self.jp_parsed]\n else:\n return [self.jp]\n elif lang == 'cht':\n if ignore_punctuation:\n return [self.cht_parsed]\n else:\n return [self.cht]\n else:\n if ignore_punctuation:\n return [\n self.cn_parsed,\n self.en_parsed,\n self.jp_parsed,\n self.cht_parsed,\n ]\n else:\n return [\n self.cn,\n self.en,\n self.jp,\n self.cht,\n ]\n\n \"\"\"\n Class attributes and methods\n\n Note that dataclasses inherited `Keyword` must override `instances` attribute,\n or `instances` will still be a class attribute of base class.\n ```\n @dataclass\n class DungeonNav(Keyword):\n instances: ClassVar = {}\n ```\n \"\"\"\n # Key: instance name. Value: instance object.\n instances: ClassVar = {}\n\n def __post_init__(self):\n self.__class__.instances[self.name] = self\n\n @classmethod\n def _compare(cls, name, keyword):\n return name == keyword\n\n @classmethod\n def find(cls, name, lang: str = None, ignore_punctuation: bool = True):\n \"\"\"\n Args:\n name: Name in any server or instance id.\n lang: Lang to find from. None to search the names from current server only.\n ignore_punctuation: True to remove punctuations and turn into lowercase before searching.\n\n Returns:\n Keyword instance.\n\n Raises:\n ScriptError: If nothing found.\n \"\"\"\n # Already a keyword\n if isinstance(name, Keyword):\n return name\n\n # Probably a variable name\n if isinstance(name, str) and '_' in name:\n for instance in cls.instances.values():\n if name == instance.name:\n return instance\n # Probably an in-game name\n if ignore_punctuation:\n name = parse_name(name)\n else:\n name = str(name)\n instance: Keyword\n for instance in cls.instances.values():\n for keyword in instance.keywords_to_find(\n lang=lang, ignore_punctuation=ignore_punctuation):\n if cls._compare(name, keyword):\n return instance\n\n # Not found\n raise ScriptError(f'Cannot find a {cls.__name__} instance that matches \"{name}\"')" }, { "identifier": "Ocr", "path": "zafkiel/ocr/ocr.py", "snippet": "class Ocr:\n # Merge results with box distance <= thres\n merge_thres_x = 0\n merge_thres_y = 0\n\n def __init__(self, button: ImageTemplate, lang=None, name=None):\n \"\"\"\n Args:\n button:\n lang: If None, use in-game language\n name: If None, use button.name\n \"\"\"\n if lang is None:\n lang = Config.SERVER_LANG\n if name is None:\n name = button.name\n\n self.button: ImageTemplate = button\n self.lang: str = lang\n self.name: str = name\n\n @cached_property\n def model(self) -> TextSystem:\n return OCR_MODEL.get_by_lang(self.lang)\n\n @staticmethod\n def pre_process(image):\n \"\"\"\n To be overridden.\n \"\"\"\n return image\n\n @staticmethod\n def after_process(result):\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def format_result(self, result) -> str:\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def ocr_single_line(self, image):\n # pre process\n start_time = time.time()\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n result, _ = self.model.ocr_single_line(image)\n # after proces\n result = self.after_process(result)\n result = self.format_result(result)\n\n cost_time = time.time() - start_time\n logger.debug(f'OCR <{self.name}> cost {cost_time:.2f}s: {result}')\n return result\n\n def filter_detected(self, result: BoxedResult) -> bool:\n \"\"\"\n Return False to drop result.\n To be overridden.\n \"\"\"\n return True\n\n def detect_and_ocr(self, image, direct_ocr=False) -> list[BoxedResult]:\n \"\"\"\n Args:\n image:\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n\n \"\"\"\n # pre process\n start_time = time.time()\n if not direct_ocr:\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n results: list[BoxedResult] = self.model.detect_and_ocr(image)\n # after proces\n for result in results:\n if not direct_ocr:\n result.box += self.button.area[:2]\n result.box = tuple(corner2area(result.box))\n\n results = [result for result in results if self.filter_detected(result)]\n results = merge_buttons(results, thres_x=self.merge_thres_x, thres_y=self.merge_thres_y)\n for result in results:\n result.ocr_text = self.after_process(result.ocr_text)\n\n cost_time = time.time() - start_time\n logger.debug(f\"OCR <{self.name}> cost {cost_time:.2f}s: {', '.join([result.ocr_text for result in results])}\")\n return results\n\n @staticmethod\n def _match_result(\n result: str,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True):\n \"\"\"\n Args:\n result (str):\n keyword_classes: A list of `Keyword` class or classes inherited `Keyword`\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n # Digits will be considered as the index of keyword\n if ignore_digit:\n if result.isdigit():\n return None\n\n # Try in current lang\n for keyword_class in keyword_classes:\n try:\n matched = keyword_class.find(\n result,\n lang=lang,\n ignore_punctuation=ignore_punctuation\n )\n return matched\n except ScriptError:\n continue\n\n return None\n\n def matched_single_line(\n self,\n image,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True\n ):\n \"\"\"\n Args:\n image: Image to detect\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n lang:\n ignore_punctuation:\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n result = self.ocr_single_line(image)\n\n result = self._match_result(\n result,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n )\n\n logger.debug(f'<{self.name}> matched: {str(result)}')\n return result\n\n def _product_button(\n self,\n boxed_result: BoxedResult,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True\n ) -> OcrResultButton:\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n matched_keyword = self._match_result(\n boxed_result.ocr_text,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n ignore_digit=ignore_digit,\n )\n button = OcrResultButton(boxed_result, matched_keyword)\n return button\n\n def matched_ocr(self, image, keyword_classes, direct_ocr=False) -> list[OcrResultButton]:\n \"\"\"\n Match all instances of 'keyword_classes' on the screen.\n\n Args:\n image: Screenshot\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n List of matched OcrResultButton.\n OCR result which didn't matched known keywords will be dropped.\n \"\"\"\n results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n results = [self._product_button(result, keyword_classes) for result in results]\n results = [result for result in results if result.is_keyword_matched]\n\n if results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return results\n\n def ocr_match_keyword(self, image, keyword_instance, direct_ocr=False, mode: int = OCR_EQUAL, threshold=0.75) \\\n -> list[OcrResultButton]:\n \"\"\"\n Match a specified keyword instance on the screen.\n\n Args:\n image: Screenshot\n keyword_instance: Instance of `Keyword` class or its subclass.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n mode: Match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n threshold: Similarity threshold, default 0.75, only work when mode is OCR_SIMILAR.\n\n Returns:\n List of matched OcrResultButton or empty list.\n \"\"\"\n boxed_results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n final_results = []\n for boxed_result in boxed_results:\n for keyword in keyword_instance.keywords_to_find():\n if mode == OCR_EQUAL and boxed_result.ocr_text != keyword:\n continue\n elif mode == OCR_CONTAINS and keyword not in boxed_result.ocr_text:\n continue\n elif mode == OCR_SIMILAR:\n similarity = SequenceMatcher(None, boxed_result.ocr_text, keyword).ratio()\n if similarity < threshold:\n continue\n button = OcrResultButton(boxed_result, keyword_instance)\n final_results.append(button)\n\n if final_results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in final_results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return final_results" }, { "identifier": "Digit", "path": "zafkiel/ocr/ocr.py", "snippet": "class Digit(Ocr):\n def __init__(self, button: ImageTemplate, lang='en', name=None):\n super().__init__(button, lang=lang, name=name)\n\n def format_result(self, result) -> int:\n \"\"\"\n Returns:\n int:\n \"\"\"\n result = super().after_process(result)\n # logger.attr(name=self.name, text=str(result))\n\n res = re.search(r'(\\d+)', result)\n if res:\n return int(res.group(1))\n else:\n # logger.warning(f'No digit found in {result}')\n return 0" }, { "identifier": "DigitCounter", "path": "zafkiel/ocr/ocr.py", "snippet": "class DigitCounter(Ocr):\n def __init__(self, button: ImageTemplate, lang='en', name=None):\n super().__init__(button, lang=lang, name=name)\n\n def format_result(self, result) -> tuple[int, int, int]:\n \"\"\"\n Do OCR on a counter, such as `14/15`, and returns 14, 1, 15\n\n Returns:\n int:\n \"\"\"\n result = super().after_process(result)\n # logger.attr(name=self.name, text=str(result))\n\n res = re.search(r'(\\d+)/(\\d+)', result)\n if res:\n groups = [int(s) for s in res.groups()]\n current, total = int(groups[0]), int(groups[1])\n # current = min(current, total)\n return current, total - current, total\n else:\n # logger.warning(f'No digit counter found in {result}')\n return 0, 0, 0" }, { "identifier": "Duration", "path": "zafkiel/ocr/ocr.py", "snippet": "class Duration(Ocr):\n @classmethod\n def timedelta_regex(cls, lang):\n regex_str = {\n 'cn': r'^(?P<prefix>.*?)'\n r'((?P<days>\\d{1,2})\\s*天\\s*)?'\n r'((?P<hours>\\d{1,2})\\s*小时\\s*)?'\n r'((?P<minutes>\\d{1,2})\\s*分钟\\s*)?'\n r'((?P<seconds>\\d{1,2})\\s*秒)?'\n r'(?P<suffix>[^天时钟秒]*?)$',\n 'en': r'^(?P<prefix>.*?)'\n r'((?P<days>\\d{1,2})\\s*d\\s*)?'\n r'((?P<hours>\\d{1,2})\\s*h\\s*)?'\n r'((?P<minutes>\\d{1,2})\\s*m\\s*)?'\n r'((?P<seconds>\\d{1,2})\\s*s)?'\n r'(?P<suffix>[^dhms]*?)$'\n }[lang]\n return re.compile(regex_str)\n\n def after_process(self, result):\n result = super().after_process(result)\n result = result.strip('.,。,')\n result = result.replace('Oh', '0h').replace('oh', '0h')\n return result\n\n def format_result(self, result: str) -> timedelta:\n \"\"\"\n Do OCR on a duration, such as `18d 2h 13m 30s`, `2h`, `13m 30s`, `9s`\n\n Returns:\n timedelta:\n \"\"\"\n matched = self.timedelta_regex(self.lang).search(result)\n if not matched:\n return timedelta()\n days = self._sanitize_number(matched.group('days'))\n hours = self._sanitize_number(matched.group('hours'))\n minutes = self._sanitize_number(matched.group('minutes'))\n seconds = self._sanitize_number(matched.group('seconds'))\n return timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)\n\n @staticmethod\n def _sanitize_number(number) -> int:\n if number is None:\n return 0\n return int(number)" }, { "identifier": "OcrResultButton", "path": "zafkiel/ocr/ocr.py", "snippet": "class OcrResultButton:\n def __init__(self, boxed_result: BoxedResult, matched_keyword: Optional[Keyword]):\n \"\"\"\n Args:\n boxed_result: BoxedResult from ppocr-onnx\n matched_keyword: Keyword object or None\n \"\"\"\n self.area = boxed_result.box\n self.search = area_pad(self.area, pad=-20)\n # self.button = boxed_result.box\n\n if matched_keyword is not None:\n self.matched_keyword = matched_keyword\n self.name = str(matched_keyword)\n else:\n self.matched_keyword = None\n self.name = boxed_result.ocr_text\n\n self.text = boxed_result.ocr_text\n self.score = boxed_result.score\n\n @property\n def is_keyword_matched(self) -> bool:\n return self.matched_keyword is not None\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True" }, { "identifier": "Page", "path": "zafkiel/ui/page.py", "snippet": "class Page:\n \"\"\"\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/page.py\n \"\"\"\n\n # Key: str, page name like \"page_main\"\n # Value: Page, page instance\n all_pages = {}\n\n @classmethod\n def clear_connection(cls):\n for page in cls.all_pages.values():\n page.parent = None\n\n @classmethod\n def init_connection(cls, destination: Page):\n \"\"\"Initialize an A* path finding among pages.\n\n Args:\n destination:\n \"\"\"\n cls.clear_connection()\n\n visited = [destination]\n visited = set(visited)\n while True:\n new = visited.copy()\n for page in visited:\n for link in cls.iter_pages():\n if link in visited:\n continue\n if page in link.links:\n link.parent = page\n new.add(link)\n if len(new) == len(visited):\n break\n visited = new\n\n @classmethod\n def iter_pages(cls, start_page: Page = None):\n pages = list(cls.all_pages.values())\n if start_page is not None and start_page in pages:\n # Move start_page to the front of the list\n pages.remove(start_page)\n pages.insert(0, start_page)\n cls.all_pages = {page.name: page for page in pages}\n return cls.all_pages.values()\n\n @classmethod\n def iter_check_buttons(cls):\n for page in cls.all_pages.values():\n yield page.check_button\n\n def __init__(self, check_button: Template, switch: Switch = None):\n self.check_button = check_button\n self.switch = switch\n self.links = {}\n (filename, line_number, function_name, text) = traceback.extract_stack()[-2]\n self.name = text[:text.find('=')].strip()\n self.parent = None\n Page.all_pages[self.name] = self\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def link(self, button: Template, destination: Page):\n self.links[destination] = button" }, { "identifier": "Switch", "path": "zafkiel/ui/switch.py", "snippet": "class Switch:\n \"\"\"\n A wrapper to handle switches in game, switch among states with retries.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n\n Examples:\n # Definitions\n submarine_hunt = Switch('Submarine_hunt', offset=120)\n submarine_hunt.add_state('on', check_button=Template(r\"assets/ON.png\"))\n submarine_hunt.add_state('off', check_button=Template(r\"assets/OFF.png\"))\n\n # Change state to ON\n submarine_view.set(TPL_ON)\n \"\"\"\n\n def __init__(self, name: str = 'Switch', is_selector: bool = False):\n \"\"\"\n Args:\n name:\n is_selector: True if this is a multi choice, click to choose one of the switches.\n For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |\n False if this is a switch, click the switch itself, and it changed in the same position.\n For example: | [ON] | -> click -> | [OFF] |\n \"\"\"\n self.name = name\n self.is_choice = is_selector\n self.state_list = []\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def add_state(self, state: str, check_button: Template, click_button: Template = None):\n \"\"\"\n Args:\n state: Must match check_button.name\n check_button:\n click_button:\n \"\"\"\n self.state_list.append({\n 'state': state,\n 'check_button': check_button,\n 'click_button': click_button if click_button is not None else check_button,\n })\n\n def get_data(self, state: Template) -> dict:\n \"\"\"\n Args:\n state:\n\n Returns:\n Dictionary in add_state\n\n Raises:\n ScriptError: If state invalid\n \"\"\"\n for row in self.state_list:\n if row['state'] == state.name:\n return row\n\n raise ScriptError(f'Switch {self.name} received an invalid state {state}')" }, { "identifier": "UI", "path": "zafkiel/ui/ui.py", "snippet": "class UI(API):\n \"\"\"\n Processing interface related functions.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py\n and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n \"\"\"\n\n # Make ui_current mutable so that it can be shared among subclasses of the UI class.\n ui_current: dict = {'page': None}\n popup_list: list = []\n\n def ui_switch_appear(self, switch: Switch) -> bool:\n \"\"\"\n Args:\n switch:\n \"\"\"\n if self.ui_get_current_page().switch != switch:\n return False\n\n for data in switch.state_list:\n if self.exists(data['check_button']):\n return True\n return False\n\n def ui_get_current_state(self, switch: Switch) -> str:\n \"\"\"\n Args:\n switch:\n\n Returns:\n state name or 'unknown'.\n \"\"\"\n if self.ui_current['page'].switch != switch:\n logger.warning(f\"{self.ui_current['page']} does not have {switch}\")\n return 'unknown'\n\n for data in switch.state_list:\n if self.exists(data['check_button']):\n return data['state']\n return 'unknown'\n\n def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:\n \"\"\"\n Args:\n page:\n timeout: Seconds to find.\n\n Returns:\n If found, return tuple of (x, y), else return False.\n \"\"\"\n return self.exists(page.check_button, timeout)\n\n def ui_get_current_page(self):\n \"\"\"\n Returns:\n Page:\n\n Raises:\n NotRunningError:\n PageUnknownError:\n \"\"\"\n\n @run_once\n def app_check():\n if not self.app_is_running():\n raise NotRunningError(\"Game not running\")\n\n timeout = Timer(10, count=20).start()\n while True:\n\n # End\n if timeout.reached():\n break\n\n # Known pages\n for page in Page.iter_pages():\n if page.check_button is None:\n continue\n if self.ui_page_appear(page=page):\n self.ui_current['page'] = page\n return page\n\n # Unknown page but able to handle\n if self.ui_additional():\n timeout.reset()\n continue\n\n app_check()\n\n # Unknown page, need manual switching\n raise PageUnknownError\n\n def _set_state(self, switch: Switch, state: Template) -> bool:\n counter = 0\n changed = False\n warning_show_timer = Timer(5, count=10).start()\n click_timer = Timer(1, count=3)\n while True:\n\n # Detect\n current = self.ui_get_current_state(switch)\n\n # End\n if current == state.name:\n logger.info(f'{switch.name} set to {state.name}')\n return changed\n\n # Warning\n if current == 'unknown':\n if self.ui_additional():\n continue\n if warning_show_timer.reached():\n logger.warning(f'Unknown {switch.name} switch')\n warning_show_timer.reset()\n if counter >= 1:\n logger.warning(\n f'{switch.name} switch {state.name} asset has evaluated to unknown too many times, '\n f'asset should be re-verified')\n return False\n counter += 1\n continue\n\n # Click\n if click_timer.reached():\n click_state = state if switch.is_choice else current\n button = switch.get_data(click_state)['click_button']\n self.touch(button)\n click_timer.reset()\n changed = True\n\n return changed\n\n def ui_goto(self, destination: Page, state: Template = None):\n \"\"\"\n Args:\n destination:\n state: Target state of switch, which must be in destination page.\n \"\"\"\n\n # check if state is valid\n if state is not None:\n if destination.switch is None:\n raise ScriptError(f'Page {destination} has no switch')\n destination.switch.get_data(state)\n\n logger.debug(f\"------ UI GOTO {str(destination).upper()}:{state.name.upper()} ------\")\n else:\n logger.debug(f\"------ UI GOTO {str(destination).upper()} ------\")\n\n # Create connection\n Page.init_connection(destination)\n\n while True:\n\n # Destination page\n if self.ui_page_appear(destination, timeout=0.5):\n self.ui_current['page'] = destination\n logger.debug(f'Page arrive: {destination}')\n if state is not None:\n self._set_state(destination.switch, state)\n break\n\n # Other pages\n clicked = False\n for page in Page.iter_pages(start_page=self.ui_current['page']):\n if page.parent is None or page.check_button is None:\n continue\n if self.exists(page.check_button):\n self.ui_current['page'] = page\n button = page.links[page.parent]\n self.touch(button)\n logger.info(f'Page switch: {page} -> {page.parent}')\n clicked = True\n break\n if clicked:\n continue\n\n # Additional\n if self.ui_additional():\n continue\n\n # Reset connection\n Page.clear_connection()\n\n def ui_ensure(self, destination: Page, state: Template = None) -> bool:\n \"\"\"\n Args:\n destination:\n state: Target state of switch, which must be in destination page.\n\n Returns:\n bool: If UI switched.\n \"\"\"\n self.ui_get_current_page()\n\n if self.ui_current['page'] == destination:\n if state is not None:\n if self.ui_get_current_state(destination.switch) == state.name:\n logger.debug(f\"Arrived at {destination}:{state.name}\")\n return False\n else:\n self._set_state(destination.switch, state)\n return True\n else:\n logger.debug(f\"Already at {destination}\")\n return False\n else:\n self.ui_goto(destination, state)\n return True\n\n def ui_ensure_index(\n self,\n index: int,\n letter: Ocr or callable,\n next_button: Template,\n prev_button: Template,\n fast: bool = True,\n interval: float = 0.2\n ):\n \"\"\"\n For pages with similar layout, ensure index of target page.\n\n Args:\n index: Index of target page.\n letter: OCR button.\n next_button:\n prev_button:\n fast: Default true. False when index is not continuous.\n interval: Seconds between two click.\n \"\"\"\n retry = Timer(1, count=2)\n while True:\n if isinstance(letter, Ocr):\n current = letter.ocr_single_line(self.screenshot())\n else:\n current = letter(self.screenshot())\n\n logger.info(f\"{self.ui_current['page']}: Index {current}\")\n diff = index - current\n if diff == 0:\n break\n if current == 0:\n logger.warning(f'ui_ensure_index got an empty current value: {current}')\n continue\n\n if retry.reached():\n button = next_button if diff > 0 else prev_button\n if fast:\n self.touch(button, times=abs(diff), interval=interval)\n else:\n self.touch(button)\n retry.reset()\n\n def get_popup_list(self, popups: list):\n \"\"\"\n Get list from program, must be called before self.ui_additional().\n\n Args:\n popups: list of handle popup functions\n \"\"\"\n for popup in popups:\n self.popup_list.append(popup)\n\n def ui_additional(self) -> bool:\n \"\"\"\n Handle all possible popups during UI switching.\n\n Returns:\n If handled any popup.\n \"\"\"\n for popup in self.popup_list:\n if popup():\n return True\n\n return False\n\n def to_json(self) -> dict:\n # May not be actual current page\n return {'ui_current': str(self.ui_current['page'])}" } ]
from zafkiel import API, Template, logger, Timer, simple_report, Config from zafkiel.ocr import Keyword, Ocr, Digit, DigitCounter, Duration, OcrResultButton from zafkiel.ui import Page, Switch, UI
12,428
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch UI
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch UI
API
2
2023-11-12 09:33:35+00:00
16k
doodledood/chat-flock
examples/chatgpt_clone_with_additional_tools.py
[ { "identifier": "InMemoryChatDataBackingStore", "path": "chatflock/backing_stores/in_memory.py", "snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __init__(\n self, messages: Optional[List[ChatMessage]] = None, participants: Optional[List[ChatParticipant]] = None\n ):\n self.messages = messages or []\n self.participants = {participant.name: participant for participant in (participants or [])}\n self.last_message_id = None if len(self.messages) == 0 else self.messages[-1].id\n\n def get_messages(self) -> List[ChatMessage]:\n return self.messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n self.last_message_id = self.last_message_id + 1 if self.last_message_id is not None else 1\n\n message = ChatMessage(\n id=self.last_message_id,\n sender_name=sender_name,\n content=content,\n timestamp=timestamp or datetime.datetime.now(),\n )\n\n self.messages.append(message)\n\n return message\n\n def clear_messages(self):\n self.messages = []\n self.last_message_id = None\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n participants = list(self.participants.values())\n active_participants = [\n participant for participant in participants if isinstance(participant, ActiveChatParticipant)\n ]\n\n return active_participants\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n participants = list(self.participants.values())\n participants = [\n participant for participant in participants if not isinstance(participant, ActiveChatParticipant)\n ]\n\n return participants\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if not isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if participant.name in self.participants:\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.participants[participant.name] = participant\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n if participant.name not in self.participants:\n raise ChatParticipantNotJoinedToChatError(participant.name)\n\n self.participants.pop(participant.name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return isinstance(participant, ActiveChatParticipant)\n\n return False\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return not isinstance(participant, ActiveChatParticipant)\n\n return False" }, { "identifier": "Chat", "path": "chatflock/base.py", "snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])" }, { "identifier": "LocalCodeExecutor", "path": "chatflock/code/local.py", "snippet": "class LocalCodeExecutor(CodeExecutor):\n def __init__(self, spinner: Optional[Halo] = None):\n self.spinner = spinner\n\n def execute(self, code: str, dependencies: Optional[Sequence[str]] = None) -> str:\n captured_output = io.StringIO()\n saved_stdout = sys.stdout\n sys.stdout = captured_output\n\n # Install dependencies before executing code using pip\n if dependencies is not None:\n if self.spinner is not None:\n self.spinner.start(\"🐍 Installing dependencies...\")\n\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", *dependencies]\n ) # nosec - Acknowledged that this is dangerous code execution, we have to use it, though.\n\n if self.spinner is not None:\n self.spinner.stop_and_persist(symbol=\"🐍\", text=\"Dependencies installed.\")\n\n local_vars: Dict[str, Any] = {}\n\n if self.spinner is not None:\n self.spinner.start(\"🐍 Executing code...\")\n\n try:\n for line in code.splitlines(keepends=False):\n if not line:\n continue\n\n exec(\n code, None, local_vars\n ) # nosec - Acknowledged that this is dangerous code execution, we have to use it, though.\n except:\n return f\"Error executing code: {traceback.format_exc()}\"\n finally:\n sys.stdout = saved_stdout\n\n if self.spinner is not None:\n self.spinner.stop_and_persist(symbol=\"🐍\", text=\"Code executed.\")\n\n res = captured_output.getvalue()\n\n return res" }, { "identifier": "CodeExecutionTool", "path": "chatflock/code/langchain.py", "snippet": "class CodeExecutionTool(BaseTool):\n executor: CodeExecutor\n name: str = \"code_executor\"\n description: str = (\n \"Use this for any capability you are missing that you think some python code will solve. That \"\n \"includes math, time, data analysis, etc. Code will get executed and the result will be \"\n \"returned as a string. Please specify dependencies if you want to use them in code.\"\n )\n args_schema: Type[pydantic_v1.BaseModel] = CodeExecutionToolArgs\n progress_text: str = \"🐍 Executing code...\"\n spinner: Optional[Halo] = None\n\n def _run(\n self,\n python_code: str,\n dependencies: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n if self.spinner is not None:\n self.spinner.stop_and_persist(\n symbol=\"🐍\", text=\"Will execute the following code:\\n```\\n\" + python_code + \"\\n```\"\n )\n self.spinner.start(self.progress_text)\n\n res = self.executor.execute(code=python_code, dependencies=dependencies)\n\n return res" }, { "identifier": "RoundRobinChatConductor", "path": "chatflock/conductors/round_robin.py", "snippet": "class RoundRobinChatConductor(ChatConductor):\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n active_participants = chat.get_active_participants()\n if len(active_participants) <= 0:\n return None\n\n messages = chat.get_messages()\n last_message = messages[-1] if len(messages) > 0 else None\n\n if last_message is not None and self.is_termination_message(last_message):\n return None\n\n last_speaker = last_message.sender_name if last_message is not None else None\n if last_speaker is None:\n return next(iter(active_participants))\n\n # Rotate to the next participant in the list.\n participant_names = [participant.name for participant in active_participants]\n\n if last_speaker not in participant_names:\n next_speaker_name = participant_names[0]\n else:\n last_speaker_index = participant_names.index(last_speaker)\n next_speaker_index = (last_speaker_index + 1) % len(participant_names)\n next_speaker_name = participant_names[next_speaker_index]\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None or not isinstance(next_speaker, ActiveChatParticipant):\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n return next_speaker\n\n def get_chat_result(self, chat: \"Chat\") -> str:\n result = super().get_chat_result(chat=chat)\n\n try:\n idx = result.rindex(\"TERMINATE\")\n result = result[:idx].strip()\n except ValueError:\n result = result.strip()\n\n return result\n\n def is_termination_message(self, message: ChatMessage) -> bool:\n return message.content.strip().endswith(\"TERMINATE\")" }, { "identifier": "LangChainBasedAIChatParticipant", "path": "chatflock/participants/langchain.py", "snippet": "class LangChainBasedAIChatParticipant(ActiveChatParticipant):\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self,\n name: str,\n chat_model: BaseChatModel,\n symbol: str = \"🤖\",\n role: str = \"AI Assistant\",\n personal_mission: str = \"Be a helpful AI assistant.\",\n other_prompt_sections: Optional[List[Section]] = None,\n retriever: Optional[BaseRetriever] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n spinner: Optional[Halo] = None,\n ignore_group_chat_environment: bool = False,\n include_timestamp_in_messages: bool = False,\n **kwargs: Any,\n ):\n super().__init__(name=name, symbol=symbol, **kwargs)\n\n self.role = role\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.other_prompt_sections = other_prompt_sections or []\n self.ignore_group_chat_environment = ignore_group_chat_environment\n self.include_timestamp_in_messages = include_timestamp_in_messages\n self.retriever = retriever\n self.tools = tools\n self.spinner = spinner\n self.personal_mission = personal_mission\n\n def create_system_message(self, chat: \"Chat\", relevant_docs: Sequence[Document]) -> str:\n now = datetime.now()\n pretty_datetime = now.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n base_sections = [\n Section(name=\"Current Time\", text=pretty_datetime),\n Section(name=\"Name\", text=self.name),\n Section(name=\"Role\", text=self.role),\n Section(name=\"Personal Mission\", text=self.personal_mission),\n Section(\n name=\"Additional Context for Response\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your response, only use \"\n \"them for context for a better response, if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n Section(\n name=\"Response Message Format\",\n list=[\n \"Your response should be the message you want to send to the group chat as your own name, \"\n \"role, and personal mission.\",\n \"Must not include any prefix (e.g., timestamp, sender name, etc.).\",\n \"Response must be a message as will be shown in the chat (timestamp and sender name are \"\n \"system-generated for you).\",\n ],\n sub_sections=[\n Section(name=\"Well-Formatted Chat Response Examples\", list=['\"Hello, how are you?\"']),\n Section(\n name=\"Badly-Formatted Chat Response Examples\",\n list=[\n (\n '\"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else '\"John: Hello, how are you?\"'\n ),\n ],\n ),\n ],\n ),\n ]\n\n active_participants = chat.get_active_participants()\n if self.ignore_group_chat_environment:\n system_message = StructuredString(sections=[*base_sections, *self.other_prompt_sections])\n else:\n system_message = StructuredString(\n sections=[\n *base_sections,\n Section(\n name=\"Chat\",\n sub_sections=[\n Section(name=\"Name\", text=chat.name or \"No name provided. Just a general chat.\"),\n Section(\n name=\"Participants\",\n text=\"\\n\".join(\n [\n f'- {str(p)}{\" -> This is you.\" if p.name == self.name else \"\"}'\n for p in active_participants\n ]\n ),\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"Your personal mission is the most important thing to you. You should always \"\n \"prioritize it.\",\n \"If a chat goal is provided, you should still follow your personal mission but \"\n \"in a way that helps the group achieve the chat goal.\",\n \"If you are the only participant in the chat, you should act as if the chat is now \"\n \"a scratch pad for you to write down your thoughts, ideas, and work on your \"\n \"mission by yourself. \"\n \"In the messages do not refer to another entity, but rather to yourself \"\n \"(I instead of You); the messages should read and sound like \"\n \"your internal thoughts and should be succinct, unless they are concrete work \"\n \"(for example, implementing something, calculating things, etc.). \"\n \"You have all the time in the world to build your thoughts, ideas, and do the \"\n \"work needed. The chat is now your place to think and iterate on your mission and \"\n \" achieve it.\",\n ],\n ),\n Section(\n name=\"Rules\",\n list=[\n \"You do not have to respond directly to the one who sent you a message. You can respond \"\n \"to anyone in the group chat.\",\n \"You cannot have private conversations with other participants. Everyone can see all \"\n \"messages sent by all other participants.\",\n ],\n ),\n Section(\n name=\"Previous Chat Messages\",\n list=[\n \"Messages are prefixed by a timestamp and the sender's name (could also be everyone). \",\n \"The prefix is for context only; it's not actually part of the message they sent. \",\n (\n 'Example: \"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else 'Example: \"John: Hello, how are you?\"'\n ),\n \"Some messages could have been sent by participants who are no longer a part of this \"\n \"conversation. Use their contents for context only; do not talk to them.\",\n \"In your response only include the message without the prefix.\",\n \"If you are the only participant in the chat, the previous chat messages are your \"\n \" memories or internal thoughts instead.\",\n ],\n ),\n ],\n ),\n *self.other_prompt_sections,\n ]\n )\n\n return str(system_message)\n\n def chat_messages_to_chat_model_messages(\n self, chat_messages: Sequence[ChatMessage], active_participants: Sequence[ActiveChatParticipant]\n ) -> List[BaseMessage]:\n messages: List[BaseMessage] = []\n for i, message in enumerate(chat_messages):\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n content = f\"[{pretty_datetime}] \"\n else:\n content = \"\"\n\n if self.ignore_group_chat_environment:\n content += f\"{message.sender_name}: {message.content}\"\n else:\n content += message.content\n\n if message.sender_name == self.name:\n if len(active_participants) > 1 or i == len(active_participants) - 1:\n messages.append(AIMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n\n if len(messages) == 0:\n messages.append(HumanMessage(content=f\"SYSTEM: The chat has started.\"))\n\n return messages\n\n def respond_to_chat(self, chat: Chat) -> str:\n if self.spinner is not None:\n self.spinner.start(text=f\"{str(self)} is thinking...\")\n\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = self.create_system_message(chat=chat, relevant_docs=relevant_docs)\n\n active_participants = chat.get_active_participants()\n all_messages = self.chat_messages_to_chat_model_messages(chat_messages, active_participants)\n all_messages = [SystemMessage(content=system_message), *all_messages]\n\n message_content = self.execute_messages(messages=all_messages)\n\n if self.spinner is not None:\n self.spinner.stop()\n\n potential_prefix = f\"{self.name}:\"\n if message_content.startswith(potential_prefix):\n message_content = message_content[len(potential_prefix) :].strip()\n\n return message_content\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n\n tool_names = \", \".join([tool.name for tool in self.tools or []])\n if tool_names == \"\":\n tool_names = \"None\"\n\n return (\n f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\\n\"\n f'{prefix} Personal Mission: \"{self.personal_mission}\"\\n{prefix} Tools: {tool_names}'\n )" }, { "identifier": "UserChatParticipant", "path": "chatflock/participants/user.py", "snippet": "class UserChatParticipant(ActiveChatParticipant):\n def __init__(self, name: str = \"User\", role: str = \"User\", symbol: str = \"👤\", **kwargs: Any):\n super().__init__(name, messages_hidden=True, **kwargs)\n\n self.role = role\n self.symbol = symbol\n\n def respond_to_chat(self, chat: Chat) -> str:\n return input(f\"{self.symbol} ({self.name}): \")\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\"" }, { "identifier": "TerminalChatRenderer", "path": "chatflock/renderers/terminal.py", "snippet": "class TerminalChatRenderer(ChatRenderer):\n def __init__(self, print_timestamps: bool = False):\n self.print_timestamps = print_timestamps\n\n def render_new_chat_message(self, chat: Chat, message: ChatMessage) -> None:\n if chat.hide_messages:\n return\n\n pretty_timestamp_with_date = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n sender = chat.get_active_participant_by_name(message.sender_name)\n if sender is None:\n symbol = \"❓\"\n\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {symbol} {message.sender_name}: {message.content}\")\n else:\n print(f\"{symbol} {message.sender_name}: {message.content}\")\n else:\n if sender.messages_hidden:\n return\n\n if chat.name is None:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {str(sender)}: {message.content}\")\n else:\n print(f\"{str(sender)}: {message.content}\")\n else:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {chat.name} > {str(sender)}: {message.content}\")\n else:\n print(f\"{chat.name} > {str(sender)}: {message.content}\")" }, { "identifier": "WebSearch", "path": "chatflock/web_research/web_research.py", "snippet": "class WebSearch:\n def __init__(\n self,\n chat_model: BaseChatModel,\n search_results_provider: SearchResultsProvider,\n page_query_analyzer: PageQueryAnalyzer,\n skip_results_if_answer_snippet_found: bool = True,\n ):\n self.chat_model = chat_model\n self.search_results_provider = search_results_provider\n self.page_query_analyzer = page_query_analyzer\n self.skip_results_if_answer_snippet_found = skip_results_if_answer_snippet_found\n\n def get_answer(\n self, query: str, n_results: int = 3, urls: Optional[List[str]] = None, spinner: Optional[Halo] = None\n ) -> Tuple[bool, str]:\n original_spinner_text = None if spinner is None else spinner.text\n qna = []\n\n if urls is None:\n if spinner is not None:\n spinner.start(f'Getting search results for \"{query}\"...')\n\n try:\n search_results = self.search_results_provider.search(query=query, n_results=n_results)\n except (TransientHTTPError, NonTransientHTTPError) as e:\n return False, f'Failed to get search results for \"{query}\" because of an error: {e}'\n\n if spinner is not None:\n spinner.succeed(f'Got search results for \"{query}\".')\n\n if len(search_results.organic_results) == 0 and search_results.answer_snippet is None:\n return False, \"Nothing was found on the web for this query.\"\n\n if search_results.knowledge_graph_description is not None:\n qna.append({\"answer\": search_results.knowledge_graph_description, \"source\": \"Knowledge Graph\"})\n\n if search_results.answer_snippet is not None:\n qna.append({\"answer\": search_results.answer_snippet, \"source\": \"Answer Snippet\"})\n\n if not self.skip_results_if_answer_snippet_found or search_results.answer_snippet is None:\n for result in search_results.organic_results:\n if url_unsupported(result.link):\n continue\n\n if spinner is not None:\n spinner.start(f'Reading & analyzing #{result.position} result \"{result.title}\"')\n\n try:\n page_result = self.page_query_analyzer.analyze(\n url=result.link, title=result.title, query=query, spinner=spinner\n )\n answer = page_result.answer\n\n if spinner is not None:\n spinner.succeed(f'Read & analyzed #{result.position} result \"{result.title}\".')\n except Exception as e:\n if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):\n if spinner is not None:\n spinner.warn(\n f'Failed to read & analyze #{result.position} result \"{result.title}\", moving on.'\n )\n\n answer = \"Unable to answer query because the page could not be read.\"\n else:\n raise\n\n qna.append({\"answer\": answer, \"source\": result.link})\n else:\n # Urls were provided, search in those urls instead of searching using a search engine\n for url in urls:\n if url_unsupported(url):\n continue\n\n if spinner is not None:\n spinner.start(f'Reading & analyzing URL \"{url}\"')\n\n try:\n page_result = self.page_query_analyzer.analyze(\n url=url, title=\"Unknown\", query=query, spinner=spinner\n )\n answer = page_result.answer\n\n if spinner is not None:\n spinner.succeed(f'Read & analyzed URL \"{url}\".')\n except Exception as e:\n if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):\n if spinner is not None:\n spinner.warn(f'Failed to read & analyze URL \"{url}\", moving on.')\n\n answer = \"Unable to answer query because the page could not be read.\"\n else:\n raise\n\n qna.append({\"answer\": answer, \"source\": url})\n\n if spinner is not None:\n spinner.start(f\"Processing results...\")\n\n formatted_answers = \"\\n\".join([f'{i + 1}. {q[\"answer\"]}; Source: {q[\"source\"]}' for i, q in enumerate(qna)])\n\n chat = Chat(\n backing_store=InMemoryChatDataBackingStore(),\n renderer=NoChatRenderer(),\n initial_participants=[\n UserChatParticipant(),\n LangChainBasedAIChatParticipant(\n name=\"Query Answer Aggregator\",\n role=\"Query Answer Aggregator\",\n personal_mission=\"Analyze query answers, discard unlikely ones, and provide an aggregated final response.\",\n chat_model=self.chat_model,\n other_prompt_sections=[\n Section(\n name=\"Aggregating Query Answers\",\n sub_sections=[\n Section(\n name=\"Process\",\n list=[\n \"Receive query and answers with sources.\",\n \"Analyze answers, discard unlikely or minority ones.\",\n \"Formulate final answer based on most likely answers.\",\n 'If no data found, respond \"The answer could not be found.\"',\n ],\n list_item_prefix=None,\n ),\n Section(\n name=\"Aggregation\",\n list=[\n \"Base final answer on sources.\",\n \"Incorporate sources as inline citations in Markdown format.\",\n 'Example: \"Person 1 was [elected president in 2012](https://...).\"',\n \"Only include sources from provided answers.\",\n \"If part of an answer is used, use the same links inline.\",\n ],\n ),\n Section(\n name=\"Final Answer Notes\",\n list=[\n \"Do not fabricate information. Stick to provided data.\",\n \"You will be given the top search results from a search engine, there is a reason they are the top results. You should pay attention to all of them and think about the query intent.\"\n \"If the answer is not found in the page data, state it clearly.\",\n \"Should be formatted in Markdown with inline citations.\",\n ],\n ),\n ],\n )\n ],\n ),\n ],\n max_total_messages=2,\n )\n chat_conductor = RoundRobinChatConductor()\n final_answer = chat_conductor.initiate_dialog(\n chat=chat,\n initial_message=str(\n StructuredString(\n sections=[Section(name=\"Query\", text=query), Section(name=\"Answers\", text=formatted_answers)]\n )\n ),\n )\n\n if spinner is not None:\n spinner.succeed(f\"Done searching the web.\")\n\n if original_spinner_text is not None:\n spinner.start(original_spinner_text)\n\n return True, final_answer" }, { "identifier": "OpenAIChatPageQueryAnalyzer", "path": "chatflock/web_research/page_analyzer.py", "snippet": "class OpenAIChatPageQueryAnalyzer(PageQueryAnalyzer):\n def __init__(\n self,\n chat_model: BaseChatModel,\n page_retriever: PageRetriever,\n text_splitter: TextSplitter,\n use_first_split_only: bool = True,\n ):\n self.chat_model = chat_model\n self.page_retriever = page_retriever\n self.text_splitter = text_splitter\n self.use_first_split_only = use_first_split_only\n\n def analyze(self, url: str, title: str, query: str, spinner: Optional[Halo] = None) -> PageQueryAnalysisResult:\n try:\n html = self.page_retriever.retrieve_html(url)\n except (NonTransientHTTPError, TransientHTTPError) as e:\n return PageQueryAnalysisResult(\n answer=f\"The query could not be answered because an error occurred while retrieving the page: {e}\"\n )\n finally:\n self.page_retriever.close()\n\n cleaned_html = clean_html(html)\n\n docs = self.text_splitter.create_documents([cleaned_html])\n\n answer = \"No answer yet.\"\n for i, doc in enumerate(docs):\n text = doc.page_content\n\n query_answerer = LangChainBasedAIChatParticipant(\n name=\"Web Page Query Answerer\",\n role=\"Web Page Query Answerer\",\n personal_mission=\"Answer queries based on provided (partial) web page content from the web.\",\n chat_model=self.chat_model,\n other_prompt_sections=[\n Section(\n name=\"Crafting a Query Answer\",\n sub_sections=[\n Section(\n name=\"Process\",\n list=[\n \"Analyze the query and the given content\",\n \"If context is provided, use it to answer the query.\",\n \"Summarize the answer in a comprehensive, yet succinct way.\",\n ],\n list_item_prefix=None,\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"If the answer is not found in the page content, it's insufficent, or not relevant \"\n \"to the query at all, state it clearly.\",\n \"Do not fabricate information. Stick to provided content.\",\n \"Provide context for the next call (e.g., if a paragraph was cut short, include \"\n \"relevant header information, section, etc. for continuity). Assume the content is \"\n \"partial content from the page. Be very detailed in the context.\",\n \"If unable to answer but found important information, include it in the context \"\n \"for the next call.\",\n \"Pay attention to the details of the query and make sure the answer is suitable \"\n \"for the intent of the query.\",\n \"A potential answer might have been provided. This means you thought you found \"\n \"the answer in a previous partial text for the same page. You should double-check \"\n \"that and provide an alternative revised answer if you think it's wrong, \"\n \"or repeat it if you think it's right or cannot be validated using the current \"\n \"text.\",\n ],\n ),\n ],\n )\n ],\n )\n\n final_answer, _ = get_response(\n query=str(\n StructuredString(\n sections=[\n Section(name=\"Query\", text=query),\n Section(name=\"Url\", text=url),\n Section(name=\"Title\", text=title),\n Section(name=\"Previous Answer\", text=answer),\n Section(name=\"Page Content\", text=f\"```{text}```\"),\n ]\n )\n ),\n answerer=query_answerer,\n )\n\n result = string_output_to_pydantic(\n output=final_answer, chat_model=self.chat_model, output_schema=PageQueryAnalysisResult\n )\n answer = result.answer\n\n if self.use_first_split_only:\n break\n\n return PageQueryAnalysisResult(\n answer=answer,\n )" }, { "identifier": "SeleniumPageRetriever", "path": "chatflock/web_research/page_retrievers/selenium_retriever.py", "snippet": "class SeleniumPageRetriever(PageRetriever):\n def __init__(\n self,\n headless: bool = False,\n main_page_timeout: int = 10,\n main_page_min_wait: int = 2,\n driver_implicit_wait: int = 1,\n driver_page_load_timeout: Optional[int] = None,\n include_iframe_html: bool = False,\n iframe_timeout: int = 10,\n user_agent: Optional[str] = None,\n ):\n if main_page_timeout < main_page_min_wait:\n raise ValueError(\"Main page timeout must be greater than or equal to main_page_min_wait.\")\n\n self.main_page_min_wait = main_page_min_wait\n self.main_page_timeout = main_page_timeout\n self.driver_implicit_wait = driver_implicit_wait\n self.driver_page_load_timeout = driver_page_load_timeout or main_page_timeout\n self.include_iframe_html = include_iframe_html\n self.iframe_timeout = iframe_timeout\n self.user_agent = user_agent\n self.headless = headless\n\n self.service: Optional[Service] = None\n self.driver: Optional[WebDriver] = None\n\n def create_driver_and_service(self) -> Tuple[WebDriver, Service]:\n if self.driver is not None and self.service is not None:\n return self.driver, self.service\n\n chrome_options = Options()\n\n if self.headless:\n chrome_options.add_argument(\"--headless\")\n\n chrome_options.add_argument(\"--no-sandbox\") # Bypass OS security model\n chrome_options.add_argument(\"--disable-gpu\") # Applicable to windows os only\n chrome_options.add_argument(\"start-maximized\") # Open the browser in maximized mode\n chrome_options.add_argument(\"disable-infobars\") # Disabling infobars\n chrome_options.add_argument(\"--disable-extensions\") # Disabling extensions\n chrome_options.add_argument(\"--disable-dev-shm-usage\") # Overcome limited resource problems\n chrome_options.add_argument(\"--ignore-certificate-errors\") # Ignore certificate errors\n chrome_options.add_argument(\"--incognito\") # Incognito mode\n chrome_options.add_argument(\"--log-level=0\") # To disable the logging\n\n if self.user_agent:\n chrome_options.add_argument(f\"user-agent={self.user_agent}\")\n\n # To solve tbsCertificate logging issue\n chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\n\n # Enable Performance Logging\n chrome_options.set_capability(\"goog:loggingPrefs\", {\"performance\": \"ALL\"})\n chrome_options.set_capability(\"pageLoadStrategy\", \"normal\")\n\n service = Service(ChromeDriverManager().install(), log_output=os.devnull)\n driver = webdriver.Chrome(service=service, options=chrome_options)\n\n return driver, service\n\n def extract_html_from_driver(self, driver: WebDriver) -> str:\n # Wait for minimum time first\n time.sleep(self.main_page_min_wait)\n\n try:\n # Wait for the main document to be ready\n WebDriverWait(driver, self.main_page_timeout - self.main_page_min_wait).until(\n lambda d: d.execute_script(\"return document.readyState\") == \"complete\"\n )\n\n iframe_contents = {}\n\n if self.include_iframe_html:\n # Find all iframe elements\n iframes = driver.find_elements(By.TAG_NAME, \"iframe\")\n\n # Iterate over each iframe, switch to it, and capture its HTML\n for iframe in iframes:\n try:\n # Wait for the iframe to be available and for its document to be fully loaded\n WebDriverWait(driver, self.iframe_timeout).until(\n lambda d: EC.frame_to_be_available_and_switch_to_it(iframe)(d) # type: ignore\n and d.execute_script(\"return document.readyState\") == \"complete\"\n )\n\n # Set a temporary ID on the iframe, so we can find it later\n driver.execute_script(\n \"arguments[0].setAttribute('selenium-temp-id', arguments[1])\", iframe, iframe.id\n )\n iframe_id = iframe.get_attribute(\"selenium-temp-id\")\n\n # Capture the iframe HTML\n iframe_html = driver.page_source\n\n iframe_soup = BeautifulSoup(iframe_html, \"html.parser\")\n iframe_body = iframe_soup.find(\"body\")\n\n iframe_contents[iframe_id] = iframe_body\n except (StaleElementReferenceException, NoSuchFrameException, NoSuchElementException):\n # If the iframe is no longer available, skip it\n pass\n finally:\n # Switch back to the main content after each iframe\n driver.switch_to.default_content()\n\n # Capture the main document HTML\n main_html = driver.page_source\n soup = BeautifulSoup(main_html, \"html.parser\")\n\n for frame_id, iframe_body in iframe_contents.items():\n # Insert the iframe body after the iframe element in the main document\n soup_iframe = soup.find(\"iframe\", {\"selenium-temp-id\": frame_id})\n if soup_iframe is None:\n continue\n\n soup_iframe.insert_after(iframe_body)\n\n # The soup object now contains the modified HTML\n full_html = str(soup)\n\n return full_html\n except (WebDriverException, NoSuchFrameException) as e:\n return f\"An error occurred while retrieving the page: {e}\"\n\n @retry(\n retry=retry_if_exception_type(TransientHTTPError),\n wait=wait_fixed(2) + wait_random(0, 2),\n stop=stop_after_attempt(3),\n )\n def retrieve_html(self, url: str, **kwargs: Any) -> str:\n try:\n driver, service = self.create_driver_and_service()\n\n # Implicitly wait for elements to be available and set timeout\n driver.implicitly_wait(self.driver_implicit_wait)\n driver.set_page_load_timeout(self.driver_page_load_timeout)\n\n driver.get(url)\n\n # Wait and extract the HTML\n full_html = self.extract_html_from_driver(driver)\n\n # Now retrieve the logs and check the status code\n logs = driver.get_log(\"performance\")\n status_code = None\n for entry in logs:\n log = json.loads(entry[\"message\"])[\"message\"]\n if log[\"method\"] == \"Network.responseReceived\" and \"response\" in log[\"params\"]:\n status_code = log[\"params\"][\"response\"][\"status\"]\n break\n\n if status_code is None:\n raise Exception(\"No HTTP response received.\")\n elif status_code >= 500:\n raise TransientHTTPError(status_code, \"Server error encountered.\")\n elif 400 <= status_code < 500:\n raise NonTransientHTTPError(status_code, \"Client error encountered.\")\n\n return full_html # or driver.page_source if you wish to return the original source\n except TimeoutException:\n raise TransientHTTPError(408, \"Timeout while waiting for the page to load.\")\n\n def close(self):\n if self.driver:\n self.driver.quit()\n\n if self.service:\n self.service.stop()" }, { "identifier": "GoogleSerperSearchResultsProvider", "path": "chatflock/web_research/search.py", "snippet": "class GoogleSerperSearchResultsProvider(SearchResultsProvider):\n def __init__(self, api_key: Optional[str] = None):\n if api_key is None:\n api_key = os.environ[\"SERPER_API_KEY\"]\n\n self.api_key = api_key\n\n @retry(\n retry=retry_if_exception_type(TransientHTTPError),\n wait=wait_fixed(2) + wait_random(0, 2),\n stop=stop_after_attempt(3),\n )\n def search(self, query: str, n_results: int = 3) -> SearchResults:\n if 0 >= n_results > 100:\n raise ValueError(\"n_results must be greater than 0 and less than or equal to 100\")\n\n url = \"https://google.serper.dev/search\"\n\n payload = json.dumps(\n {\n \"q\": query,\n \"num\": n_results + 5, # Request extra results to account for non-organic results\n }\n )\n headers = {\"X-API-KEY\": self.api_key, \"Content-Type\": \"application/json\"}\n\n r = requests.request(\"POST\", url, headers=headers, data=payload)\n if r.status_code >= 300:\n if r.status_code >= 500:\n raise TransientHTTPError(r.status_code, r.text)\n\n raise NonTransientHTTPError(r.status_code, r.text)\n else:\n results = r.json()\n\n return SearchResults(\n answer_snippet=results.get(\"answerBox\", {}).get(\"answer\"),\n knowledge_graph_description=results.get(\"knowledgeGraph\", {}).get(\"description\"),\n organic_results=[\n OrganicSearchResult(\n position=organic_result[\"position\"], title=organic_result[\"title\"], link=organic_result[\"link\"]\n )\n for organic_result in results[\"organic\"][:n_results]\n ],\n )" }, { "identifier": "WebResearchTool", "path": "chatflock/web_research/web_research.py", "snippet": "class WebResearchTool(BaseTool):\n web_search: WebSearch\n n_results: int = 3\n spinner: Optional[Halo] = None\n name: str = \"web_search\"\n description: str = \"Research the web. Use that to get an answer for a query you don't know or unsure of the answer to, for recent events, or if the user asks you to. This will evaluate answer snippets, knowledge graphs, and the top N results from google and aggregate a result.\"\n args_schema: Type[BaseModel] = WebSearchToolArgs\n progress_text: str = \"Searching the web...\"\n\n def _run(\n self,\n query: str,\n urls: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n return self.web_search.get_answer(query=query, n_results=self.n_results, urls=urls, spinner=self.spinner)[1]" }, { "identifier": "create_chat_model", "path": "examples/common.py", "snippet": "def create_chat_model(\n model: str = \"gpt-4-1106-preview\",\n temperature: float = 0.0,\n cache_db_file_path: Optional[str] = \"output/llm_cache.db\",\n) -> BaseChatModel:\n if cache_db_file_path is not None:\n Path(cache_db_file_path).parent.mkdir(parents=True, exist_ok=True)\n\n set_llm_cache(SQLiteCache(database_path=cache_db_file_path))\n\n chat_model = ChatOpenAI(temperature=temperature, model=model)\n\n return chat_model" }, { "identifier": "get_max_context_size", "path": "examples/common.py", "snippet": "def get_max_context_size(chat_model: BaseChatModel) -> Optional[int]:\n try:\n max_context_size = OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore\n except ValueError:\n return None\n\n return max_context_size" } ]
import typer from dotenv import load_dotenv from halo import Halo from langchain.text_splitter import TokenTextSplitter from chatflock.backing_stores import InMemoryChatDataBackingStore from chatflock.base import Chat from chatflock.code import LocalCodeExecutor from chatflock.code.langchain import CodeExecutionTool from chatflock.conductors.round_robin import RoundRobinChatConductor from chatflock.participants.langchain import LangChainBasedAIChatParticipant from chatflock.participants.user import UserChatParticipant from chatflock.renderers.terminal import TerminalChatRenderer from chatflock.web_research import WebSearch from chatflock.web_research.page_analyzer import OpenAIChatPageQueryAnalyzer from chatflock.web_research.page_retrievers.selenium_retriever import SeleniumPageRetriever from chatflock.web_research.search import GoogleSerperSearchResultsProvider from chatflock.web_research.web_research import WebResearchTool from examples.common import create_chat_model, get_max_context_size
11,146
def chatgpt_clone_with_additional_tools( model: str = "gpt-4-1106-preview", model_for_page_analysis: str = "gpt-3.5-turbo-1106", temperature: float = 0.0, temperature_for_page_analysis: float = 0.0, ) -> None: chat_model = create_chat_model(model=model, temperature=temperature) chat_model_for_page_analysis = create_chat_model( model=model_for_page_analysis, temperature=temperature_for_page_analysis ) max_context_size_for_page_analysis = get_max_context_size(chat_model_for_page_analysis) or 12_000
def chatgpt_clone_with_additional_tools( model: str = "gpt-4-1106-preview", model_for_page_analysis: str = "gpt-3.5-turbo-1106", temperature: float = 0.0, temperature_for_page_analysis: float = 0.0, ) -> None: chat_model = create_chat_model(model=model, temperature=temperature) chat_model_for_page_analysis = create_chat_model( model=model_for_page_analysis, temperature=temperature_for_page_analysis ) max_context_size_for_page_analysis = get_max_context_size(chat_model_for_page_analysis) or 12_000
page_retriever = SeleniumPageRetriever()
10
2023-11-12 11:10:58+00:00
16k
atlantic-quantum/Shipyard
shipyard/passes/semantic_analysis/semantic_analyzer.py
[ { "identifier": "ErrorCode", "path": "shipyard/compiler_error.py", "snippet": "class ErrorCode(Enum):\n \"\"\"Class to enumerate error codes of the shipyard\"\"\"\n\n ID_NOT_FOUND = \"Identifier not found\"\n DUPLICATE_ID = \"Duplicate id found\"\n NOT_IN_GLOBAL_SCOPE = \"Not in global scope\"\n INVALID_DEFCAL_ARGUMENT = \"Invalid defcal argument\"\n EXPRESSION_IN_DEFCAL = \"Expression in defcal signature, unhandled\"\n INVALID_GATECALL_ARGUMENT = \"Invalid gatecall argument\"\n UNHANDLED = \"Unhandled case\"\n UNDETERMINED_CALL = \"Unable to determine a unique function for function call\"\n NO_SEQC_STATEMENT = \"No equivalent SEQC statement\"\n COMPILE_OUT = \"Statement should be compiled out before printing SEQC code\"\n PORT_NOT_FOUND = \"Port was not found within setup\"\n INSTRUMENT_NOT_FOUND = \"Instrument was not found within setup\"\n INPUT_NOT_FOUND = \"Input value was not found\"\n OUTPUT_NOT_SUPPORTED = \"Output type not supported\"\n INPUT_TYPE_NOT_SUPPORTED = \"Input type not supported\"\n INVALID_ARGUMENT = \"Invalid argument\"\n INVALID_WAVEFORM = \"Waveform does not meet timing constraints\"\n INCLUDE_ERROR = \"Error in include statement\"" }, { "identifier": "SemanticError", "path": "shipyard/compiler_error.py", "snippet": "class SemanticError(Error):\n \"\"\"Error class for semantic errors, raised by SemanticAnalyser\"\"\"" }, { "identifier": "LOGGER", "path": "shipyard/logger.py", "snippet": "LOGGER = logging.getLogger(\"Compiler\")" }, { "identifier": "Mangler", "path": "shipyard/mangle.py", "snippet": "class Mangler(LiteralVisitor, TypeVisitor, GenericVisitor):\n \"\"\"\n QASMVisitor that visits CalibrationDefinition or QuantumGate nodes to gather\n the iformation required to mangle function definition signatures and function calls\n \"\"\"\n\n def __init__(\n self, node: ast.CalibrationDefinition | ast.QuantumGate = None\n ) -> None:\n super().__init__()\n self.name = None\n self.qubits = None\n self.arguments = None\n self.return_type = None\n if node:\n self.visit(node)\n\n def signature(self) -> FunctionSignature:\n \"\"\"Converts instances of Mangler class to FunctionSignature objects\n\n Returns:\n FunctionSignature:\n with name, params qubits and return_type from the Mangler class instance\n \"\"\"\n return FunctionSignature(\n name=self.name,\n params=self.arguments,\n qubits=self.qubits,\n return_type=self.return_type,\n )\n\n # pylint: disable=C0103\n # disable snake_case naming style\n # these functions are of the form \"visit_{QASMNode class name}\"\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition):\n \"\"\"\n CalibrationDefinition node visitor\n Extracts name, arguments, qubits and return_type from the node\n and makes them usable for mangling\n\n Args:\n node (ast.CalibrationDefinition):\n openQASM defcal statement to visit\n \"\"\"\n self.name = self.visit(node.name)\n self.arguments = [self.visit(arg) for arg in node.arguments]\n self.qubits = [self.visit(qubit) for qubit in node.qubits]\n self.return_type = self.visit(node.return_type) if node.return_type else \"\"\n\n def visit_QuantumGate(self, node: ast.QuantumGate):\n \"\"\"\n QuantumGate node visitor\n Extracts name, arguments, qubits and return_type from the node\n and makes them usable for mangling\n\n Args:\n node (ast.QuantumGate):\n openQASM quantum gate call node to visit\n \"\"\"\n self.name = self.visit(node.name)\n self.arguments = [self.visit(arg) for arg in node.arguments]\n self.qubits = [self.visit(qubit) for qubit in node.qubits]\n self.return_type = \"\"\n\n def visit_QuantumReset(self, node: ast.QuantumReset):\n \"\"\"\n QuantumReset node visitor\n Extracts qubits from the node.\n To be usable for mangling the following operations are performed\n name is set to \"reset\"\n arguments are set to empty ([])\n return_type is set to empty string (\"\")\n\n Args:\n node (ast.QuantumReset):\n openQASM quantum reset node to visit\n \"\"\"\n match node:\n case ast.QuantumReset(ast.Identifier(q)):\n self.name = \"reset\"\n self.arguments = []\n self.qubits = [q]\n self.return_type = \"\"\n case ast.QuantumReset(ast.IndexedIdentifier()):\n raise NotImplementedError\n case _:\n raise NotImplementedError # this should not happen on correct trees\n\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement):\n \"\"\"\n QuantumMeasurement node visitor\n Extracts qubits from the node.\n To be usable for mangling the following operations are performed\n name is set to \"measure\"\n arguments are set to empty ([])\n return_type is set \"BIT\"\n\n Args:\n node (ast.QuantumMeasurement):\n openQASM quantum measurement node to visit\n \"\"\"\n match node:\n case ast.QuantumMeasurement(ast.Identifier(q)):\n self.name = \"measure\"\n self.arguments = []\n self.qubits = [q]\n self.return_type = \"BIT\"\n case ast.QuantumMeasurement(ast.IndexedIdentifier()):\n raise NotImplementedError\n case _:\n raise NotImplementedError # this should not happen on correct trees\n\n def visit_Identifier(self, node: ast.Identifier) -> str:\n \"\"\"\n Identifier node visitor\n\n Args:\n node (ast.Identifier):\n openQASM identifier node to visit\n\n Returns:\n str: the name of the identifier\n \"\"\"\n return node.name\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument) -> str:\n \"\"\"\n ClassicalArgument node visitor\n\n Args:\n node (ast.ClassicalArgument):\n openQASM classical argument node to visit\n\n Returns:\n str: the type of the classical argument\n \"\"\"\n return self.visit(node.type)\n\n # pylint: enable=C0103" }, { "identifier": "ScopeContext", "path": "shipyard/utilities.py", "snippet": "class ScopeContext(Enum):\n \"\"\"\n Class for keeping track of the current scope of a openQASM program\n\n detailed discussion can be found at:\n https://openqasm.com/language/scope.html\n\n With additional discussion regarding the scope of calibration definitions at:\n https://openqasm.com/language/pulses.html#inline-calibration-blocks\n \"\"\"\n\n GLOBAL = \"GLOBAL\"\n LOCAL = \"LOCAL\"\n SUBROUTINE = \"SUBROUTINE\"\n DEFCAL = \"DEFCAL\"" }, { "identifier": "GenericVisitor", "path": "shipyard/visitors/generic_visitor.py", "snippet": "class GenericVisitor(QASMVisitor):\n def _visit_list(\n self, nodes: list[ast.QASMNode], visit_function: callable, context=None\n ):\n [visit_function(node) for node in nodes]\n\n def visit_Program(self, node: ast.Program, context=None):\n \"\"\"\n An entire OpenQASM 3 program represented by a list of top level statements\n \"\"\"\n self._visit_list(node.statements, self.visit)\n\n def visit_Annotation(self, node: ast.Annotation, context=None):\n \"\"\"An annotation applied to a statment.\"\"\"\n\n def visit_Statement(self, node: ast.Statement, context=None):\n \"\"\"A statement: anything that can appear on its own line\"\"\"\n self._visit_list(node.annotations, self.visit)\n\n def visit_Include(\n self, node: ast.Include, context=None\n ) -> ast.Include | list[ast.Statement]:\n \"\"\"\n An include statement\n \"\"\"\n self.visit_Statement(node)\n\n def visit_ExpressionStatement(self, node: ast.ExpressionStatement, context=None):\n \"\"\"A statement that contains a single expression\"\"\"\n self.visit_Statement(node)\n self.visit(node.expression)\n\n # Note that QubitDeclaration is not a valid QuantumStatement, because qubits\n # can only be declared in global scopes, not in gates.\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration, context=None):\n \"\"\"\n Global qubit declaration\n\n Example::\n\n qubit q;\n qubit[4] q;\n\n q // <- qubit\n 4 // <- size\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.qubit)\n if node.size:\n self.visit(node.size)\n\n def visit_QuantumGateDefinition(\n self, node: ast.QuantumGateDefinition, context=None\n ):\n \"\"\"\n Define a new quantum gate\n\n Example::\n\n gate cx c, t {\n ctrl @ unitary(pi, 0, pi) c, t;\n }\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit_Identifier)\n self._visit_list(node.qubits, self.visit_Identifier)\n self._visit_list(node.body, self.visit)\n\n def visit_QuantumStatement(self, node: ast.QuantumStatement, context=None):\n \"\"\"Statements that may appear inside a gate declaration\"\"\"\n self.visit_Statement(node)\n\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration, context=None):\n \"\"\"\n A extern declaration\n\n Example::\n\n extern get_pauli(int[prec], context=None) -> bit[2 * n];\n\n get_pauli // <- name\n int[prec] // <- classical type\n bit[2 * n] // <- return type\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_Expression(self, node: ast.Expression, context=None):\n \"\"\"An expression: anything that returns a value\"\"\"\n\n def visit_Identifier(self, node: ast.Identifier, context=None):\n \"\"\"\n An identifier\n\n Example::\n\n q1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression, context=None):\n \"\"\"\n A unary expression\n\n Example::\n\n ~b\n !bool\n -i\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression, context=None):\n \"\"\"\n A binary expression\n\n Example::\n\n q1 || q2\n\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral, context=None):\n \"\"\"\n An integer literal\n\n Example::\n\n 1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral, context=None):\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral, context=None):\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1im\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral, context=None):\n \"\"\"\n A boolean expression\n\n Example::\n\n true\n false\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral, context=None):\n \"\"\"A literal bitstring value. The ``value`` is the numerical value of the\n bitstring, and the ``width`` is the number of digits given.\"\"\"\n self.visit_Expression(node)\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral, context=None):\n \"\"\"\n A duration literal\n\n Example::\n\n 1.0ns\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_ArrayLiteral(self, node: ast.ArrayLiteral, context=None):\n \"\"\"Array literal, used to initialise declared arrays.\n\n For example::\n\n array[uint[8], 2] row{1, 2};\n array[uint[8], 2, 2] my_array{{1, 2}, {3, 4}};\n array[uint[8], 2, 2] my_array{row, row};\n \"\"\"\n self.visit_Expression(node)\n self._visit_list(node.values, self.visit)\n\n def visit_FunctionCall(self, node: ast.FunctionCall, context=None):\n \"\"\"\n A function call expression\n\n Example::\n\n foo(1)\n\n foo // <- name\n\n \"\"\"\n self.visit_Expression(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n\n def visit_Cast(self, node: ast.Cast, context=None):\n \"\"\"\n A cast call expression\n\n Example::\n\n counts += int[1](b);\n\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.type)\n self.visit(node.argument)\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet, context=None):\n \"\"\"\n A set of discrete values. This can be used for the values in a ``for``\n loop, or to index certain values out of a register::\n\n for i in {1, 2, 3} {}\n let aliasqubits[{2, 3, 4}];\n \"\"\"\n self._visit_list(node.values, self.visit)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition, context=None):\n \"\"\"\n Range definition.\n\n Example::\n\n 1:2\n 1:1:10\n :\n \"\"\"\n if node.start:\n self.visit(node.start)\n if node.end:\n self.visit(node.end)\n if node.step:\n self.visit(node.step)\n\n IndexElement = ast.DiscreteSet | list[ast.Expression | ast.RangeDefinition]\n\n def _visit_IndexElement(self, node: IndexElement, context=None):\n if isinstance(node, list):\n return self._visit_list(node, self.visit)\n return self.visit(node)\n\n def visit_IndexExpression(self, node: ast.IndexExpression, context=None):\n \"\"\"\n An index expression.\n\n Example::\n\n q[1]\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.collection)\n self._visit_IndexElement(node.index)\n\n def visit_IndexedIdentifier(self, node: ast.IndexedIdentifier, context=None):\n \"\"\"An indentifier with index operators, such that it can be used as an\n lvalue. The list of indices is subsequent index brackets, so in::\n\n a[{1, 2, 3}][0:1, 0:1]\n\n the list of indices will have two elements. The first will be a\n :class:`.DiscreteSet`, and the second will be a list of two\n :class:`.RangeDefinition`\\\\ s.\n \"\"\"\n self.visit_Identifier(node.name)\n self._visit_list(node.indices, self._visit_IndexElement)\n\n def visit_Concatenation(self, node: ast.Concatenation, context=None):\n \"\"\"\n Concatenation of two registers, for example::\n\n a ++ b\n a[2:3] ++ a[0:1]\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_QuantumGate(self, node: ast.QuantumGate, context=None):\n \"\"\"\n Invoking a quantum gate\n\n Example::\n cx[dur] 0, 1;\n\n or\n\n ctrl @ p(λ) a, b;\n\n ctrl @ // <- quantumGateModifier\n p // <- quantumGateName\n λ // <- argument\n a, b // <- qubit\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.modifiers, self.visit_QuantumGateModifier)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.qubits, self.visit)\n if node.duration:\n self.visit(node.duration)\n\n def visit_QuantumGateModifier(self, node: ast.QuantumGateModifier, context=None):\n \"\"\"\n A quantum gate modifier\n\n Attributes:\n modifier: 'inv', 'pow', or 'ctrl'\n expression: only pow modifier has expression.\n\n Example::\n\n inv @\n pow(1/2)\n ctrl\n \"\"\"\n if node.argument:\n self.visit(node.argument)\n\n def visit_QuantumPhase(self, node: ast.QuantumPhase, context=None):\n \"\"\"\n A quantum phase instruction\n\n Example::\n\n ctrl @ gphase(λ) a;\n\n ctrl @ // <- quantumGateModifier\n λ // <- argument\n a // <- qubit\n\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.modifiers, self.visit_QuantumGateModifier)\n self.visit(node.argument)\n self._visit_list(node.qubits, self.visit)\n\n # Not a full expression because it can only be used in limited contexts.\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement, context=None):\n \"\"\"\n A quantum measurement instruction\n\n Example::\n\n measure q;\n \"\"\"\n self.visit(node.qubit)\n\n # Note that this is not a QuantumStatement because it involves access to\n # classical bits.\n def visit_QuantumMeasurementStatement(\n self, node: ast.QuantumMeasurementStatement, context=None\n ):\n \"\"\"Stand-alone statement of a quantum measurement, potentially assigning the\n result to a classical variable. This is not the only statement that\n `measure` can appear in (it can also be in classical declaration statements\n and returns).\"\"\"\n self.visit_Statement(node)\n self.visit_QuantumMeasurement(node.measure)\n if node.target:\n self.visit(node.target)\n\n def visit_QuantumBarrier(self, node: ast.QuantumBarrier, context=None):\n \"\"\"\n A quantum barrier instruction\n\n Example::\n\n barrier q;\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.qubits, self.visit)\n\n def visit_QuantumReset(self, node: ast.QuantumReset, context=None):\n \"\"\"\n A reset instruction.\n\n Example::\n\n reset q;\n \"\"\"\n\n self.visit_QuantumStatement(node)\n self.visit(node.qubits)\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument, context=None):\n \"\"\"\n Classical argument for a gate or subroutine declaration\n \"\"\"\n self.visit(node.type)\n self.visit_Identifier(node.name)\n\n def visit_ExternArgument(self, node: ast.ExternArgument, context=None):\n \"\"\"Classical argument for an extern declaration.\"\"\"\n\n self.visit(node.type)\n\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration, context=None):\n \"\"\"\n Classical variable declaration\n\n Example::\n\n bit c;\n \"\"\"\n\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n if node.init_expression:\n self.visit(node.init_expression)\n\n def visit_IODeclaration(self, node: ast.IODeclaration, context=None):\n \"\"\"\n Input/output variable declaration\n\n Exampe::\n\n input angle[16] theta;\n output bit select;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration, context=None):\n \"\"\"\n Constant declaration\n\n Example::\n\n const int[16] n10;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n self.visit(node.init_expression)\n\n def visit_ClassicalType(self, node: ast.ClassicalType, context=None):\n \"\"\"\n Base class for classical type\n \"\"\"\n\n def visit_IntType(self, node: ast.IntType, context=None):\n \"\"\"\n Node representing a classical ``int`` (signed integer) type, with an\n optional precision.\n\n Example:\n\n int[8]\n int[16]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_UintType(self, node: ast.UintType, context=None):\n \"\"\"\n Node representing a classical ``uint`` (unsigned integer) type, with an\n optional precision.\n\n Example:\n\n uint[8]\n uint[16]\n \"\"\"\n\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_FloatType(self, node: ast.FloatType, context=None):\n \"\"\"\n Node representing the classical ``float`` type, with the particular IEEE-754\n floating-point size optionally specified.\n\n Example:\n\n float[16]\n float[64]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_ComplexType(self, node: ast.ComplexType, context=None):\n \"\"\"\n Complex ClassicalType. Its real and imaginary parts are based on other\n classical types.\n\n Example::\n\n complex[float]\n complex[float[32]]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.base_type:\n self.visit(node.base_type)\n\n def visit_AngleType(self, node: ast.AngleType, context=None):\n \"\"\"\n Node representing the classical ``angle`` type, with an optional precision.\n\n Example::\n\n angle[8]\n angle[16]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_BitType(self, node: ast.BitType, context=None):\n \"\"\"\n Node representing the classical ``bit`` type, with an optional size.\n\n Example::\n\n bit[8]\n creg[8]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_BoolType(self, node: ast.BoolType, context=None):\n \"\"\"\n Leaf node representing the Boolean classical type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_ArrayType(self, node: ast.ArrayType, context=None):\n \"\"\"Type of arrays that include allocation of the storage.\n\n This is generally any array declared as a standard statement, but not\n arrays declared by being arguments to subroutines.\n \"\"\"\n self.visit_ClassicalType(node)\n self.visit(node.base_type)\n self._visit_list(node.dimensions, self.visit)\n\n def visit_ArrayReferenceType(self, node: ast.ArrayReferenceType, context=None):\n \"\"\"Type of arrays that are a reference to an array with allocated storage.\n\n This is generally any array declared as a subroutine argument. The\n dimensions can be either a list of expressions (one for each dimension), or\n a single expression, which is the number of dimensions.\n\n For example::\n\n // `a` will have dimensions `[IntegerLiteral(2)]` (with a list), because\n // it is a 1D array, with a length of 2.\n def f(const array[uint[8], 2] a) {}\n // `b` will have dimension `IntegerLiteral(3)` (no list), because it is\n // a 3D array, but we don't know the lengths of its dimensions.\n def f(const array[uint[8], #dim=3] b) {}\n \"\"\"\n\n self.visit_ClassicalType(node)\n self.visit(node.base_type)\n if isinstance(node.dimensions, list):\n self._visit_list(node.dimensions, self.visit)\n else:\n self.visit(node.dimensions)\n\n def visit_DurationType(self, node: ast.DurationType, context=None):\n \"\"\"\n Leaf node representing the ``duration`` type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_StretchType(self, node: ast.StretchType, context=None) -> ast.StretchType:\n \"\"\"\n Leaf node representing the ``stretch`` type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_CalibrationGrammarDeclaration(\n self, node: ast.CalibrationGrammarDeclaration, context=None\n ):\n \"\"\"\n Calibration grammar declaration\n\n Example::\n\n defcalgrammar \"openpulse\";\n \"\"\"\n\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement, context=None):\n \"\"\"An inline ``cal`` statement for embedded pulse-grammar interactions.\n\n Example::\n\n cal {\n shift_phase(drive($0), theta);\n }\n \"\"\"\n self.visit_Statement(node)\n self._visit_list(node.body, self.visit)\n\n def visit_CalibrationBlock(self, node: ast.CalibrationBlock, context=None):\n self._visit_list(node.body, self.visit)\n\n def visit_CalibrationDefinition(\n self, node: ast.CalibrationDefinition, context=None\n ):\n \"\"\"\n Calibration definition\n\n Example::\n\n defcal rz(angle[20] theta) q {\n shift_phase drive(q), -theta;\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.qubits, self.visit_Identifier)\n self._visit_list(node.body, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition, context=None):\n \"\"\"\n Subroutine definition\n\n Example::\n\n def measure(qubit q, context=None) -> bit {\n s q;\n h q;\n return measure q;\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.body, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument, context=None):\n \"\"\"\n Quantum argument for a subroutine declaration\n \"\"\"\n self.visit_Identifier(node.name)\n if node.size:\n self.visit(node.size)\n\n def visit_ReturnStatement(self, node: ast.ReturnStatement, context=None):\n \"\"\"\n Classical or quantum return statement\n\n Example::\n\n return measure q;\n\n return a + b\n\n \"\"\"\n self.visit_Statement(node)\n if node.expression:\n self.visit(node.expression)\n\n def visit_BreakStatement(self, node: ast.BreakStatement, context=None):\n \"\"\"\n Break statement\n\n Example::\n\n break;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_ContinueStatement(self, node: ast.ContinueStatement, context=None):\n \"\"\"\n Continue statement\n\n Example::\n\n continue;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_EndStatement(self, node: ast.EndStatement, context=None):\n \"\"\"\n End statement\n\n Example::\n\n end;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_BranchingStatement(self, node: ast.BranchingStatement, context=None):\n \"\"\"\n Branch (``if``) statement\n\n Example::\n\n if (temp == 1) {\n ry(-pi / 2) scratch[0];\n } else continue;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.condition)\n self._visit_list(node.if_block, self.visit)\n self._visit_list(node.else_block, self.visit)\n\n def visit_WhileLoop(self, node: ast.WhileLoop, context=None):\n \"\"\"\n While loop\n\n Example::\n\n while(~success) {\n reset magic;\n ry(pi / 4) magic;\n successdistill(magic, scratch);\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.while_condition)\n self._visit_list(node.block, self.visit)\n\n def visit_ForInLoop(self, node: ast.ForInLoop, context=None):\n \"\"\"\n For in loop\n\n Example::\n\n for i in [0: 2] {\n majority a[i], b[i + 1], a[i + 1];\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n self.visit(node.set_declaration)\n self._visit_list(node.block, self.visit)\n\n def visit_DelayInstruction(self, node: ast.DelayInstruction, context=None):\n \"\"\"\n Delay instruction\n\n Example::\n\n delay[start_stretch] $0;\n \"\"\"\n self.visit_QuantumStatement(node)\n self.visit(node.duration)\n self._visit_list(node.qubits, self.visit)\n\n def visit_Box(self, node: ast.Box, context=None):\n \"\"\"\n Timing box\n\n Example::\n\n box [maxdur] {\n delay[start_stretch] $0;\n x $0;\n }\n \"\"\"\n self.visit_QuantumStatement(node)\n if node.duration:\n self.visit(node.duration)\n self._visit_list(node.body, self.visit)\n\n def visit_DurationOf(self, node: ast.DurationOf, context=None):\n \"\"\"\n Duration Of\n\n Example::\n\n durationof({x $0;})\n \"\"\"\n self.visit_Expression(node)\n self._visit_list(node.target, self.visit)\n\n def visit_SizeOf(self, node: ast.SizeOf, context=None):\n \"\"\"``sizeof`` an array's dimensions.\"\"\"\n self.visit_Expression(node)\n self.visit(node.target)\n if node.index:\n self.visit(node.index)\n\n def visit_AliasStatement(self, node: ast.AliasStatement, context=None):\n \"\"\"\n Alias statement\n\n Example::\n\n let aqubits[0];\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.target)\n self.visit(node.value)\n\n def visit_ClassicalAssignment(self, node: ast.ClassicalAssignment, context=None):\n \"\"\"\n Classical assignment\n\n Example::\n\n a[0]1;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.lvalue)\n self.visit(node.rvalue)\n\n def visit_Pragma(self, node: ast.Pragma, context=None):\n \"\"\"\n Pragma\n Example::\n\n #pragma val1 val2 val3\n \"\"\"\n\n def visit_WaveformType(self, node: ast.WaveformType, context=None):\n self.visit_ClassicalType(node)\n\n def visit_PortType(self, node: ast.PortType, context=None):\n self.visit_ClassicalType(node)\n\n def visit_FrameType(self, node: ast.FrameType, context=None):\n self.visit_ClassicalType(node)" }, { "identifier": "LiteralVisitor", "path": "shipyard/visitors/literal_visitor.py", "snippet": "class LiteralVisitor:\n \"\"\"Class defining methods for visiting openQASM literal-nodes\"\"\"\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral) -> str:\n \"\"\"\n BitstringLiteral node visitor:\n\n Args:\n node (ast.BitstringLiteral):\n openQASM bitstring literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n value = bin(node.value)[2:]\n if len(value) < node.width:\n value = \"0\" * (node.width - len(value)) + value\n return f'\"{value}\"'\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> str:\n \"\"\"\n IntegerLiteral node visitor:\n\n Args:\n node (ast.IntegerLiteral):\n openQASM integer literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return str(node.value)\n\n def visit_FloatLiteral(self, node: ast.IntegerLiteral) -> str:\n \"\"\"\n FloatLiteral node visitor:\n\n Args:\n node (ast.FloatLiteral):\n openQASM float literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return str(node.value)\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> str:\n \"\"\"\n ImaginaryLiteral node visitor:\n\n Args:\n node (ast.ImaginaryLiteral):\n openQASM imaginary literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return str(node.value) + \"im\"\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> str:\n \"\"\"\n BooleanLiteral node visitor:\n\n Args:\n node (ast.BooleanLiteral):\n openQASM boolean literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return \"true\" if node.value else \"false\"\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> str:\n \"\"\"\n DurationLiteral node visitor:\n\n Args:\n node (ast.DurationLiteral):\n openQASM duration literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return f\"{node.value}{node.unit.name}\"\n\n # def visit_ArrayLiteral(self, node: ast.ArrayLiteral) -> None:\n # self._visit_sequence(node.values, context, start=\"{\", end=\"}\", separator=\", \")" }, { "identifier": "TypeVisitor", "path": "shipyard/visitors/type_visitor.py", "snippet": "class TypeVisitor:\n \"\"\"Class defining methods for visiting openQASM type-nodes\"\"\"\n\n def _visit_type_node(self, node: ast.ClassicalType) -> str:\n \"\"\"\n type node visitor:\n Returns the name of a Type node\n Example:\n node:ast.FloatType -> 'FLOAT'\n\n Args:\n node (ast.ClassicalType): node that is a subclass of ClassicalType\n\n Returns:\n str: name of the node type\n \"\"\"\n return str(node.__class__.__name__).upper().split(\"TYPE\", maxsplit=1)[0]\n\n def _visit_type_node_wrapper(self, node: ast.ClassicalType):\n return self._visit_type_node(node)\n\n visit_IntType = _visit_type_node_wrapper\n visit_UintType = _visit_type_node_wrapper\n visit_FloatType = _visit_type_node_wrapper\n visit_ComplexType = _visit_type_node_wrapper # todo expand to indicate base type\n visit_AngleType = _visit_type_node_wrapper\n visit_BitType = _visit_type_node_wrapper\n visit_BoolType = _visit_type_node_wrapper\n visit_ArrayType = (\n _visit_type_node_wrapper # todo expand to indicate type+size of array\n )\n\n def visit_ArrayReferenceType(self, node: ast.ArrayReferenceType) -> None:\n \"\"\"\n ToDo\n \"\"\"\n raise NotImplementedError\n\n visit_DurationType = _visit_type_node_wrapper\n visit_StretchType = _visit_type_node_wrapper\n\n visit_PortType = _visit_type_node_wrapper\n visit_FrameType = _visit_type_node_wrapper\n visit_WaveformType = _visit_type_node_wrapper" }, { "identifier": "CalScopedSymbolTable", "path": "shipyard/passes/semantic_analysis/scoped_symbol_table.py", "snippet": "class CalScopedSymbolTable(ScopedSymbolTable):\n \"\"\"\n Scoped Symbol Table for openPulse code, used when in 'cal' and 'defcal' blocks\n in openQASM programs and using the openPulse defcalgrammar\n \"\"\"\n\n _builtin_cal_symbol_lists = [BUILTIN_CAL_TYPES, BUILTIN_OPENPULSE, BUILTIN_ZI_WFM]\n\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = None,\n init_cal: bool = False,\n ) -> None:\n super().__init__(scope_name, enclosing_scope)\n if init_cal:\n self._init_cal_builtins()\n\n def _init_cal_builtins(self):\n for symbol_list in self._builtin_cal_symbol_lists:\n for symbol in symbol_list:\n self.insert(symbol)" }, { "identifier": "ScopedSymbolTable", "path": "shipyard/passes/semantic_analysis/scoped_symbol_table.py", "snippet": "class ScopedSymbolTable:\n \"\"\"\n Symbol Table for keeping track of symbols, defined in openQASM programs,\n and their scope.\n\n Used during Semantic Analysis of openQASM programs\n\n The symbol table is a managed dictionary, which should not be interacted with\n directly but rather using the 'insert' and 'lookup' methods of the class.\n\n Todo consider implementing __getitem__, __setitem__, items() and values() methods\n \"\"\"\n\n _builtin_symbol_lists = [BUILTIN_TYPES, BUILTIN_ZI_EXP, BUILTIN_ZI_FUNC]\n\n _builtin_functions = []\n _builtin_gates = [\"measure\"] # todo is this built in?\n\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = None,\n ) -> None:\n self._symbols: dict[str, Symbol] = {}\n self.scope_name = scope_name\n self.enclosing_scope: \"ScopedSymbolTable\" = enclosing_scope\n LOGGER.debug(\"Created scope named: %s\", self.scope_name)\n if enclosing_scope is None:\n self._init_builtins()\n\n def _init_builtins(self):\n for symbol_list in self._builtin_symbol_lists:\n for symbol in symbol_list:\n self.insert(symbol)\n\n def __str__(self) -> str:\n header1 = \"SCOPE (SCOPED SYMBOL TABLE)\"\n lines = [\"\\n\", header1, \"=\" * len(header1)]\n for header_name, header_value in (\n (\"Scope name\", self.scope_name),\n (\n \"Enclosing scope\",\n self.enclosing_scope.scope_name if self.enclosing_scope else None,\n ),\n ):\n lines.append(f\"{header_name:<16}: {header_value}\")\n header2 = \"Scope (Scoped symbol table) contents\"\n lines.extend([header2, \"-\" * len(header2)])\n lines.extend(\n f\"{key:>16}: {value.__repr__()}\" for key, value in self._symbols.items()\n )\n lines.append(\"\\n\")\n symbol_table_string = \"\\n\".join(lines)\n return symbol_table_string\n\n __repr__ = __str__\n\n def insert(self, symbol: Symbol):\n \"\"\"Inserts a symbol into the symbol table\n\n Args:\n symbol (Symbol): Symbol to insert into the table\n \"\"\"\n LOGGER.debug(\"Insert into %s: %s\", self.scope_name, symbol)\n self._symbols[symbol.name] = symbol\n\n def lookup(self, name: str, current_scope_only: bool = False) -> Symbol:\n \"\"\"looks up a symbol by name in the symbol table\n\n Args:\n name\n (str): the name of the symbol to look up in the symbol table\n current_scope_only (bool, optional):\n If True a symbol is only looked up in the current scope.\n Else, if it is not found within the current symbol table,\n it is looked up in any enclosing scopes\n\n Returns:\n Symbol:\n A Symbol with name matching the name being looked up,\n None:\n If a symbol with the name is not found\n \"\"\"\n LOGGER.debug(\"Lookup: %s. (Scope name: %s\", name, self.scope_name)\n # 'symbol' is either an instance of the Symbol class or None\n symbol = self._symbols.get(name, None)\n\n if symbol is not None:\n return symbol\n\n if current_scope_only:\n return None\n\n # recursively go up the chain and lookup the name\n if self.enclosing_scope is not None:\n return self.enclosing_scope.lookup(name)\n return None\n\n def keys(self, current_scope_only=False) -> list[str]:\n \"\"\"returns the name of all symbols in scope\n\n Args:\n current_scope_only (bool, optional):\n If true only returns the names of symbols in current scope.\n Defaults to False.\n\n Returns:\n list[str]: names of all the symbols in scope\n \"\"\"\n symbol_names = list(self._symbols.keys())\n if current_scope_only:\n return symbol_names\n if self.enclosing_scope is not None:\n symbol_names.extend(\n [\n name\n for name in self.enclosing_scope.keys()\n if name not in symbol_names\n ]\n )\n return symbol_names" }, { "identifier": "AliasSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class AliasSymbol(Symbol):\n \"\"\"A symbol that represents an alias of another symbol\"\"\"" }, { "identifier": "ClassicalSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class ClassicalSymbol(Symbol):\n \"\"\"A symbol that represents a classical variable\n\n the kind of the symbol should be the name of a builtin classical symbol\n (i.e. BuiltinSymbol/BuiltinCalSymbol but not QUBIT)\n \"\"\"\n\n _validate_classical = validator(\"kind\", allow_reuse=True)(\n kind_must_be_name_of_classical_type\n )" }, { "identifier": "ConstantSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class ConstantSymbol(Symbol):\n \"\"\"A symbol that represents a classical compile time constant\n\n the kind of the symbol should be the name of a builtin classical symbol\n (i.e. BuiltinSymbol/BuiltinCalSymbol but not QUBIT)\n \"\"\"\n\n _validate_classical = validator(\"kind\", allow_reuse=True)(\n kind_must_be_name_of_classical_type\n )" }, { "identifier": "DefcalSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class DefcalSymbol(GateSymbol):\n \"\"\"A symbol representing a calibration definition of an operation\n\n e.g., the physical pulses used to perfrom a gate operation\n or a measurement on a qubit\n\n for further reading\n https://openqasm.com/language/pulses.html\n \"\"\"" }, { "identifier": "ExternSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class ExternSymbol(SubroutineSymbol):\n \"\"\"A symbol representing external functions or ports,\n\n for further reading\n https://openqasm.com/language/classical.html#extern-function-calls\n \"\"\"" }, { "identifier": "GateSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class GateSymbol(SubroutineSymbol):\n \"\"\"A symbol representing a quantum gate operation\n\n a quantum gate represents the unitary quantum operation\n\n for further reading\n https://openqasm.com/language/gates.html\n \"\"\"\n\n qubits: list[QuantumSymbol] = Field(default_factory=lambda: [])" }, { "identifier": "IOSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class IOSymbol(Symbol):\n \"\"\"A symbol that represents Input/Output of a script,\n i.e. a value that will be provided at runtime or a value that will be returned\n from running the script.\n\n This behaviour is not currently implemented in our pipeline\n\n for further reading\n https://openqasm.com/language/directives.html#input-output\n \"\"\"\n\n _validate_classical = validator(\"kind\", allow_reuse=True)(\n kind_must_be_name_of_classical_type\n )" }, { "identifier": "LiteralSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class LiteralSymbol(ClassicalSymbol):\n \"\"\"A symbol that represents a Literal\"\"\"" }, { "identifier": "QuantumSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class QuantumSymbol(Symbol):\n \"\"\"\n A symbol representing quantum objects, i.e., either a qubit or a qubit register\n \"\"\"\n\n @validator(\"kind\")\n def kind_must_be_name_of_quantum_type(cls, kind: str) -> str:\n \"\"\"if the input string is a name of a quantum type it is returned else a\n validation error is raised\n\n Args:\n kind (str): should be the name of a quantum type\n\n Returns:\n str: input string if it is a name of a quantum type\n \"\"\"\n assert kind in _BUILTIN_QUANTUM_SYMBOL_NAMES\n return kind" }, { "identifier": "SubroutineSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class SubroutineSymbol(Symbol):\n \"\"\"A symbol representing subroutines\n\n for further reading\n https://openqasm.com/language/subroutines.html\n \"\"\"\n\n params: list[Symbol] = Field(default_factory=lambda: [])\n return_type: str = None\n\n @validator(\"return_type\")\n def return_classical_or_none(cls, return_type: str):\n \"\"\"If the return type is a classical type or an array it is returned\n in upper case format, else a ValidationError is raised\n\n Args:\n return_type (str): should be a name of a valid classical type or 'array'\n\n Returns:\n str: uppercase input string if valid classical type or 'ARRAY'\n \"\"\"\n if return_type is not None:\n return_type = return_type.upper()\n assert return_type in _BUILTIN_CLASSICAL_SYMBOL_NAMES + [\"ARRAY\"]\n return return_type" }, { "identifier": "Symbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class Symbol(BaseModel):\n \"\"\"Base class for Symbols\"\"\"\n\n name: str\n kind: str = None\n\n @validator(\"kind\")\n def force_kind_uppercase(cls, kind: str) -> str:\n \"\"\"If the string 'kind' is not None make it uppercase\n\n Args:\n kind (str): a string (or None)\n\n Returns:\n str: the same string but uppercase (returns None if 'kind' is None)\n \"\"\"\n if kind is not None:\n return kind.upper()\n return kind" } ]
from contextlib import contextmanager from openpulse import ast from ...compiler_error import ErrorCode, SemanticError from ...logger import LOGGER from ...mangle import Mangler from ...utilities import ScopeContext from ...visitors import GenericVisitor, LiteralVisitor, TypeVisitor from .scoped_symbol_table import CalScopedSymbolTable, ScopedSymbolTable from .symbols import ( AliasSymbol, ClassicalSymbol, ConstantSymbol, DefcalSymbol, ExternSymbol, GateSymbol, IOSymbol, LiteralSymbol, QuantumSymbol, SubroutineSymbol, Symbol, )
12,021
""" Module that host the SemanticAnalyser QASMVisitor class that can be used to perform semantic analysis on openQASM Abstract Syntax Trees. """ # pylint: disable=R0904: # Too many public methods class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor): """ QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree usage: qasm_ast = openpulse.parse(qasm_program_string) sa = SemanticAnalyser() sa.visit(qasm_ast) """ def __init__(self) -> None: self.current_scope: ScopedSymbolTable = None self._calibration_scope: CalScopedSymbolTable = None self._scope_context: ScopeContext = None super().__init__() @property def calibration_scope(self) -> CalScopedSymbolTable: """Getter for the 'calibration_scope' symbol table of a SemanticAnalyser instance. Creates and returns an initialised calibration scope on first call. Subsequent calls return the same scope. Returns: CalScopedSymbolTable: a scoped symbol table used for symbols declared within openpulse syntax (cal & defcal) """ if self._calibration_scope is None: self.ensure_in_global_scope(ast.Identifier("init cal scope")) self._calibration_scope = CalScopedSymbolTable( "cal_scope", enclosing_scope=self.current_scope, init_cal=True ) return self._calibration_scope @property def scope_context(self) -> ScopeContext: """Getter for the 'scope_context' property of a SemanticAnalyser instance""" return self._scope_context @scope_context.setter def scope_context(self, value: ScopeContext): LOGGER.debug("SET SCOPE CONTEXT: %s", value) self._scope_context = value # pylint: disable=C0103 # disable snake_case naming style # these functions are of the form "visit_{QASMNode class name}" def visit_Program(self, node: ast.Program) -> None: """ Program node visitor, creates and enters a global symbol table (global scope), visits all other statements in the openQASM program. Args: node (ast.Program): openQASM program ast node to visit """ global_scope = ScopedSymbolTable( scope_name="global", enclosing_scope=self.current_scope, ) with self.scope_context_manager(global_scope, ScopeContext.GLOBAL): for statement in node.statements: self.visit(statement) def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None: """ ExternDeclaration node visitor, inserts a symbol representing the external function declaration into current_scope (symbol table) Args: node (ast.ExternDeclaration): openQASM external function declaration ast node to visit """ extern_name = node.name.name params = [ ClassicalSymbol( name=f"{extern_name}_arg_{i}", kind=self.visit(argument.type) ) for i, argument in enumerate(node.arguments) ] return_type = self.visit(node.return_type) if node.return_type else None
""" Module that host the SemanticAnalyser QASMVisitor class that can be used to perform semantic analysis on openQASM Abstract Syntax Trees. """ # pylint: disable=R0904: # Too many public methods class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor): """ QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree usage: qasm_ast = openpulse.parse(qasm_program_string) sa = SemanticAnalyser() sa.visit(qasm_ast) """ def __init__(self) -> None: self.current_scope: ScopedSymbolTable = None self._calibration_scope: CalScopedSymbolTable = None self._scope_context: ScopeContext = None super().__init__() @property def calibration_scope(self) -> CalScopedSymbolTable: """Getter for the 'calibration_scope' symbol table of a SemanticAnalyser instance. Creates and returns an initialised calibration scope on first call. Subsequent calls return the same scope. Returns: CalScopedSymbolTable: a scoped symbol table used for symbols declared within openpulse syntax (cal & defcal) """ if self._calibration_scope is None: self.ensure_in_global_scope(ast.Identifier("init cal scope")) self._calibration_scope = CalScopedSymbolTable( "cal_scope", enclosing_scope=self.current_scope, init_cal=True ) return self._calibration_scope @property def scope_context(self) -> ScopeContext: """Getter for the 'scope_context' property of a SemanticAnalyser instance""" return self._scope_context @scope_context.setter def scope_context(self, value: ScopeContext): LOGGER.debug("SET SCOPE CONTEXT: %s", value) self._scope_context = value # pylint: disable=C0103 # disable snake_case naming style # these functions are of the form "visit_{QASMNode class name}" def visit_Program(self, node: ast.Program) -> None: """ Program node visitor, creates and enters a global symbol table (global scope), visits all other statements in the openQASM program. Args: node (ast.Program): openQASM program ast node to visit """ global_scope = ScopedSymbolTable( scope_name="global", enclosing_scope=self.current_scope, ) with self.scope_context_manager(global_scope, ScopeContext.GLOBAL): for statement in node.statements: self.visit(statement) def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None: """ ExternDeclaration node visitor, inserts a symbol representing the external function declaration into current_scope (symbol table) Args: node (ast.ExternDeclaration): openQASM external function declaration ast node to visit """ extern_name = node.name.name params = [ ClassicalSymbol( name=f"{extern_name}_arg_{i}", kind=self.visit(argument.type) ) for i, argument in enumerate(node.arguments) ] return_type = self.visit(node.return_type) if node.return_type else None
extern_symbol = ExternSymbol(
14
2023-11-16 17:37:29+00:00
16k
quantuminterface/qiclib
src/qiclib/code/qi_sequencer.py
[ { "identifier": "QiCellProperty", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as length definition in cQiWait commands and QiPulse\"\"\"\n\n def __init__(self, cell, name):\n super().__init__()\n from .qi_jobs import QiCell\n\n self.name: str = name\n self.cell: QiCell = cell\n self.operations = lambda val: val\n self.opcode = \"x\"\n\n @property\n def opcode_p(self):\n \"\"\"Old opcode in parantheses for building new opcode\"\"\"\n return self.opcode if self.opcode == \"x\" else f\"({self.opcode})\"\n\n def resolve_equal(self, o: object) -> bool:\n if isinstance(o, QiCellProperty):\n return self.name == o.name and self.opcode == o.opcode\n elif o is None:\n return False\n try:\n return o == self()\n except KeyError:\n return False # At time of comparison, unresolved property is not equal to o\n\n def __call__(self):\n value = self.cell._properties.get(self.name)\n\n if isinstance(value, QiCellProperty) or value is None:\n raise KeyError(\"Property could not be resolved\")\n return self.operations(value)\n\n @property\n def value(self):\n if self.type == QiType.TIME:\n return util.conv_time_to_cycles(self())\n elif self.type == QiType.FREQUENCY:\n return util.conv_freq_to_nco_phase_inc(self())\n elif self.type == QiType.NORMAL:\n return self()\n elif self.type == QiType.STATE:\n return self()\n else:\n raise RuntimeError(\n \"Mising type information to resolve value to convert to a machine value.\"\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME, QiType.FREQUENCY)\n return self()\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_cell_property(self)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, QiCellProperty) and self.resolve_equal(other)\n\n def move_add_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations # Necessary because of recursion otherwise\n self.operations = lambda val: old_op(val) + x.value\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_radd_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value + old_op(val)\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_sub_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) - x.value\n self.opcode = f\"{self.opcode_p} - {x}\"\n return self\n\n def move_rsub_op_to_property(self, x: _QiConstValue):\n old_op = self.operations\n self.operations = lambda val: x.value - old_op(val)\n self.opcode = f\"{x} - {self.opcode_p}\"\n return self\n\n def move_mul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) * x.value\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n def move_rmul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value * old_op(val)\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n # These operations are not implemented for general QiExpressions\n # and are, therefore, left as they are.\n\n def __truediv__(self, x):\n if (isinstance(x, _QiConstValue) and x._given_value == 1) or x == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) / x\n self.opcode = f\"{self.opcode_p} / {x}\"\n return self\n\n def __rtruediv__(self, x):\n old_op = self.operations\n self.operations = lambda val: x / old_op(val)\n self.opcode = f\"{x} / {self.opcode_p}\"\n return self" }, { "identifier": "QiVariableSet", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiVariableSet:\n \"\"\"Class provides Set functionality for QiVariables.\n QiVariables overwrite comparison operations to build operation trees, to still allow comparisons ids are used.\n \"\"\"\n\n def __init__(self) -> None:\n self._var_list: List[\"_QiVariableBase\"] = []\n self._var_id_list: List[int] = []\n\n def __contains__(self, x):\n return x.id in self._var_id_list\n\n def add(self, x: \"_QiVariableBase\"):\n if x.id not in self._var_id_list:\n self._var_id_list.append(x.id)\n self._var_list.append(x)\n\n def update(self, var_set):\n for var in var_set:\n self.add(var)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self._var_list):\n var = self._var_list[self.n]\n self.n += 1\n return var\n else:\n raise StopIteration\n\n def __len__(self):\n return len(self._var_list)" }, { "identifier": "_QiCalcBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiCalcBase(QiExpression):\n \"\"\"Represents binary and unary operations.\"\"\"\n\n def __init__(self, val1, op, val2) -> None:\n super().__init__()\n\n self.val1 = val1\n self.op: QiOp = op\n self.val2 = val2\n\n from .qi_types import add_qi_calc_constraints\n\n add_qi_calc_constraints(op, val1, val2, self)\n\n @property\n def contained_variables(self):\n \"\"\"Function traverses the operation tree to determine which QiVariables are used for the calculations.\n Found QiVariables are added to _contained_variables\"\"\"\n if len(self._contained_variables) == 0:\n self._variables_to_container()\n\n return self._contained_variables\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_calc(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return (\n isinstance(other, _QiCalcBase)\n and self.op == other.op\n and self.val1._equal_syntax(other.val1)\n and self.val2._equal_syntax(other.val2)\n )\n\n def __str__(self):\n return (\n \"(\"\n + self.val1.__str__()\n + \" \"\n + self.op.value\n + \" \"\n + self.val2.__str__()\n + \")\"\n )" }, { "identifier": "_QiVariableBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiVariableBase(QiExpression):\n \"\"\"Base class for QiVariables.\n Variables can be relevant to only a subset of QiCells, this subset is saved in _relevant_cells.\n Variables are simple expressions and, therefore, are typed.\n Variables can be compared by self.id.\"\"\"\n\n id_iter = itertools.count()\n str_id_iter = itertools.count()\n\n def __init__(\n self,\n type: QiType,\n value: Optional[Union[int, float]] = None,\n name=None,\n ):\n from .qi_jobs import QiCell\n\n assert isinstance(type, QiType)\n assert value is None or isinstance(value, (int, float))\n\n super().__init__()\n\n if type != QiType.UNKNOWN:\n self._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION)\n\n self.value = value\n\n self._value = value\n self._relevant_cells: Set[QiCell] = set()\n self.id = next(_QiVariableBase.id_iter)\n self.str_id = next(_QiVariableBase.str_id_iter)\n\n self._contained_variables.add(self)\n\n self.name = name\n\n @property\n def contained_variables(self):\n return self._contained_variables\n\n @staticmethod\n def reset_str_id():\n _QiVariableBase.str_id_iter = itertools.count()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_variable(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, _QiVariableBase) and self.id == other.id\n\n def __hash__(self) -> int:\n return self.id\n\n def __str__(self) -> str:\n return f\"QiVariable({self.name or ''})\"" }, { "identifier": "QiExpression", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiExpression:\n \"\"\"Superclass of every possible qicode expression.\"\"\"\n\n def __init__(self):\n self._contained_variables = QiVariableSet()\n self._type_info = _TypeInformation(self)\n\n @property\n def type(self):\n return self._type_info.type\n\n @staticmethod\n def _from(x):\n \"\"\"Creates an instance of QiExpression of the provided argument if possible.\"\"\"\n if isinstance(x, (float, int)):\n return _QiConstValue(x)\n elif isinstance(x, QiExpression):\n return x\n else:\n raise RuntimeError(f\"Can not create QiExpression from type {type(x)}.\")\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `accept`. This is a bug.\"\n )\n\n @property\n def contained_variables(self):\n \"\"\"Returns the variables used in this expression.\n QiExpression subclasses which contain variables (_QiCalcBase and _QiVariableBase) need to overwrite this.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `contained_variables`. This is a bug.\"\n )\n\n def _variables_to_container(self):\n if isinstance(self, _QiVariableBase):\n self._contained_variables.add(self)\n elif isinstance(self, _QiCalcBase):\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `_equal_syntax`. This is a bug.\"\n )\n\n # QiCellProperties are supposed to support some form of constant folding.\n # However, originally, instead of implementing this in an extra pass over\n # QiJob they were added to the QiCellProperty class.\n # In order to keep support for this limited form of constant folding\n # This logic was placed here.\n\n # (I'm not sure why we don't fold when both operands are QiCellProperty.\n # And I think the reason we don't fold tow _QiConstValue is that originally\n # They were just int/float and would \"fold\" implicitely when using any\n # math operator on them)\n\n # If anyone ever feels the need to improve this situation, I would\n # encourage them to implement a constant folding pass using the existing\n # dataflow infrastructure.\n # This pdf seems to give a nice short introduction into the topic:\n # http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/slides/15-02-constant-propagation-annotated.pdf\n\n def __add__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_add_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_radd_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.PLUS, x)\n\n def __radd__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_radd_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_add_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.PLUS, self)\n\n def __sub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_sub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rsub_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MINUS, x)\n\n def __rsub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rsub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_sub_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MINUS, self)\n\n def __mul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_mul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rmul_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MULT, x)\n\n def __rmul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rmul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_mul_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MULT, self)\n\n def __lshift__(self, x):\n return _QiCalcBase(self, QiOp.LSH, QiExpression._from(x))\n\n def __rshift__(self, x):\n return _QiCalcBase(self, QiOp.RSH, QiExpression._from(x))\n\n def __and__(self, x):\n return _QiCalcBase(self, QiOp.AND, QiExpression._from(x))\n\n def __rand__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.AND, self)\n\n def __or__(self, x):\n return _QiCalcBase(self, QiOp.OR, QiExpression._from(x))\n\n def __ror__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.OR, self)\n\n def __xor__(self, x):\n return _QiCalcBase(self, QiOp.XOR, QiExpression._from(x))\n\n def __rxor__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.XOR, self)\n\n def __invert__(self):\n return _QiCalcBase(self, QiOp.NOT, None)\n\n def __lt__(self, x):\n return QiCondition(self, QiOpCond.LT, QiExpression._from(x))\n\n def __le__(self, x):\n return QiCondition(self, QiOpCond.LE, QiExpression._from(x))\n\n def __gt__(self, x):\n return QiCondition(self, QiOpCond.GT, QiExpression._from(x))\n\n def __ge__(self, x):\n return QiCondition(self, QiOpCond.GE, QiExpression._from(x))\n\n def __eq__(self, x):\n return QiCondition(self, QiOpCond.EQ, QiExpression._from(x))\n\n def __ne__(self, x):\n return QiCondition(self, QiOpCond.NE, QiExpression._from(x))" }, { "identifier": "_QiConstValue", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiConstValue(QiExpression):\n \"\"\"Represents QiExpression which are a constant (compiletime known) values.\n Integers can be used as either NORMAL, TIME or FREQUENCY values. It is up to the type inference to figure it out.\n If the value can be represented as a float value it has an additional attribute float_value which represents the value before\n it has been converted to the integer representation used by the sequencer.\n \"\"\"\n\n def __init__(self, value: Union[int, float]):\n super().__init__()\n\n self._given_value = value # Value given to the constructor. Is interpreted differently depending on the type.\n\n # Constant STATE values can only be 0 or 1, therefore we forbid QiType.STATE if we have a different value.\n if isinstance(self._given_value, float) or self._given_value not in [1, 0]:\n self._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.INVALID_STATE_CONSTANT\n )\n\n if isinstance(self._given_value, float):\n self._type_info.add_illegal_type(\n QiType.NORMAL, _IllegalTypeReason.INVALID_NORMAL_CONSTANT\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME or self.type, QiType.FREQUENCY)\n return self._given_value\n\n @property\n def value(self):\n \"\"\"\n Integer representation of the constant value.\n Since the sequencer doesn't have a floating point unit, any calculations has to be using integers.\n In practice, this means we only perform fixpoint arithmetic and need to convert any float like value\n to such an fixpoint value.\n The correct conversion depends on the type.\n \"\"\"\n if self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n return self._given_value\n elif self.type == QiType.TIME:\n return int(util.conv_time_to_cycles(self._given_value, \"ceil\"))\n else:\n assert self.type == QiType.FREQUENCY\n return util.conv_freq_to_nco_phase_inc(self._given_value)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_constant(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n assert QiType.UNKNOWN not in (self.type, other.type)\n return isinstance(other, _QiConstValue) and self.value == other.value\n\n def __str__(self):\n if self.type in (QiType.TIME, QiType.FREQUENCY):\n value = self.float_value\n elif self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n value = self.value\n else:\n raise RuntimeError(\n \"This program point should be unreacheable. Please file a bug report.\"\n )\n return f\"{value:g}\"" }, { "identifier": "QiCondition", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCondition:\n \"\"\"Saves conditional comparisons.\n Can only be root node\"\"\"\n\n def __init__(\n self,\n val1: QiExpression,\n op: QiOpCond = QiOpCond.GT,\n val2: QiExpression = _QiConstValue(0),\n ) -> None:\n self._contained_variables = QiVariableSet()\n\n self.val1 = val1\n self.op = op\n self.val2 = val2\n\n from .qi_types import add_qi_condition_constraints\n\n add_qi_condition_constraints(op, val1, val2)\n\n @property\n def contained_variables(self):\n if len(self._contained_variables) == 0:\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n return self._contained_variables\n\n def accept(self, visitor):\n visitor.visit_condition(self)\n\n def __str__(self) -> str:\n return f\"{self.val1} {self.op.value} {self.val2}\"" }, { "identifier": "QiOpCond", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiOpCond(Enum):\n LT = \"<\"\n LE = \"<=\"\n GT = \">\"\n GE = \">=\"\n EQ = \"==\"\n NE = \"!=\"\n\n @staticmethod\n def invert(condition):\n inverted = {\n QiOpCond.EQ: QiOpCond.NE,\n QiOpCond.NE: QiOpCond.EQ,\n QiOpCond.LT: QiOpCond.GE,\n QiOpCond.LE: QiOpCond.GT,\n QiOpCond.GT: QiOpCond.LE,\n QiOpCond.GE: QiOpCond.LT,\n }\n inv = inverted.get(condition)\n if inv is None:\n raise RuntimeError(\"Condition not found: \" + str(condition))\n return inv" }, { "identifier": "QiOp", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiOp(Enum):\n PLUS = \"+\"\n MINUS = \"-\"\n MULT = \"*\"\n LSH = \"<<\"\n RSH = \">>\"\n AND = \"&\"\n OR = \"|\"\n XOR = \"^\"\n NOT = \"~\"" }, { "identifier": "SeqLoad", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqLoad(SeqITypeInst):\n def __init__(\n self,\n dst: int,\n base: int,\n offset: int = 0,\n ):\n \"\"\"Load Sequencer instruction.\n\n :param dst: The register address which will contain the loaded value.\n :param base: The register address which contains the source address.\n :param offset: Constant offset added to the source address. Defaults to 0.\n :param width: Number of bits to be loaded. Defaults to 32.\n :param signed: Is the loaded value signed. Depending on this flag the loaded value is sign extended.\n \"\"\"\n\n assert SequencerInstruction.is_value_in_lower_immediate(\n offset\n ), \"Invalid offset ({offset}) to load instruction.\"\n\n # The hardware currently only supports 32 bit memory accesses.\n super().__init__(\n SeqOpCode.LOAD,\n SeqMemFunct3.get_from_width(32, False),\n dst,\n base,\n offset,\n )\n\n @property\n def base_reg(self):\n return self.register" }, { "identifier": "SeqStore", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqStore(SeqSTypeInst):\n \"\"\"Store Sequencer instruction.\n\n :param src: The register address which contains the value to be stored.\n :param base: The register address which contains the destination address.\n :param offset: Constant offset added to the destination address. Defaults to 0.\n :param width: Number of bits to be stored. Defaults to 32.\n \"\"\"\n\n def __init__(\n self,\n src: int,\n base: int,\n offset: int = 0,\n ):\n assert SequencerInstruction.is_value_in_lower_immediate(\n offset\n ), \"Invalid offset ({offset}) to store instruction.\"\n\n # The hardware currently only supports 32 bit memory accesses.\n super().__init__(\n SeqOpCode.STORE, SeqMemFunct3.get_from_width(32, False), base, src, offset\n )\n\n @property\n def base_reg(self):\n return self.reg1\n\n @property\n def src_reg(self):\n return self.reg2" }, { "identifier": "SeqAwaitQubitState", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqAwaitQubitState(SeqITypeInst):\n def __init__(\n self,\n cell: int = 0,\n dst: int = 0,\n ) -> None:\n super().__init__(\n SeqOpCode.SYNCH, SeqExtSynchFunct3.QUBIT_STATE, dst, 0, cell, 0\n )" }, { "identifier": "SequencerInstruction", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SequencerInstruction:\n OPCODE_WIDTH = 7\n FUNCT3_WIDTH = 3\n FUNCT7_WIDTH = 7\n REGISTER_WIDTH = 5\n LOWER_IMMEDIATE_WIDTH = 12\n UPPER_IMMEDIATE_WIDTH = 20\n\n LOWER_IMM_MAX = (\n 2 ** (LOWER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Lower immediate 12 Bits - 1Bit Signed\n LOWER_IMM_MIN = -(2 ** (LOWER_IMMEDIATE_WIDTH - 1))\n\n UPPER_IMM_MAX = (\n 2 ** (UPPER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Upper immediate 20 Bits - 1Bit Signed\n UPPER_IMM_MIN = -(2 ** (UPPER_IMMEDIATE_WIDTH - 1))\n UPPER_IMM_MAX_UNSIGNED = 2**UPPER_IMMEDIATE_WIDTH\n\n imm_type = Union[int] # might include float in the future\n\n def __init__(self, OpCode: SeqOpCode) -> None:\n self.op = OpCode\n\n @staticmethod\n def is_value_in_lower_immediate(val: imm_type) -> bool:\n return (\n SequencerInstruction.LOWER_IMM_MIN\n <= val\n <= SequencerInstruction.LOWER_IMM_MAX\n )\n\n @staticmethod\n def is_value_in_unsigned_upper_immediate(val: imm_type) -> bool:\n return SequencerInstruction.UPPER_IMM_MAX_UNSIGNED >= abs(val)\n\n @abstractmethod\n def get_riscv_instruction(self) -> int:\n pass" }, { "identifier": "SeqRegImmediateInst", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqRegImmediateInst(SeqITypeInst):\n def __init__(\n self,\n operator: QiOp,\n dst_reg: int = 0,\n register: int = 0,\n immediate: SequencerInstruction.imm_type = 0,\n ) -> None:\n funct3 = super().QiOpToFunct3(operator)\n funct7 = super().QiOpToFunct7(operator)\n super().__init__(\n SeqOpCode.REG_IMM, funct3, dst_reg, register, immediate, funct7\n )" }, { "identifier": "SeqRegRegInst", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqRegRegInst(SeqRTypeInst):\n def __init__(\n self, operator: QiOp, dst_reg: int = 0, reg_1: int = 0, reg_2: int = 0\n ) -> None:\n funct3 = super().QiOpToFunct3(operator)\n funct7 = super().QiOpToFunct7(operator)\n super().__init__(\n SeqOpCode.REGISTER_REGISTER, funct3, funct7, dst_reg, reg_1, reg_2\n )" }, { "identifier": "SeqLoadUpperImm", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqLoadUpperImm(SeqUTypeInst):\n def __init__(\n self, dst_reg: int = 0, immediate: SequencerInstruction.imm_type = 0\n ) -> None:\n super().__init__(SeqOpCode.LOAD_UPPER_IMM, dst_reg, immediate)" }, { "identifier": "SeqJump", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqJump(SequencerInstruction):\n \"\"\"Does not represent actual J-Type instruction, RISC-V only supports address sizes as multiples of 2\"\"\"\n\n def __init__(self, rel_jump: int = 0) -> None:\n super().__init__(SeqOpCode.JUMP)\n self.jump_val = rel_jump\n\n def get_riscv_instruction(self) -> int:\n instruction = 0\n instruction |= self.op.value\n instruction |= (\n (self.jump_val & 0x7F800) >> 11\n ) << SequencerInstruction.OPCODE_WIDTH + SequencerInstruction.REGISTER_WIDTH\n instruction |= (\n ((self.jump_val & 0x400) >> 10)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 8\n )\n instruction |= (\n (self.jump_val & 0x3FF)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 9\n )\n instruction |= (\n ((self.jump_val & 0x80000) >> 19)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 19\n )\n\n return instruction\n\n def __str__(self) -> str:\n return f\"Op: {self.op.name}, immediate: {hex(self.jump_val)}\\n\"" }, { "identifier": "SeqBranch", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqBranch(SeqBTypeInst):\n def __init__(self, operator, reg1: int, reg2: int, rel_jump: int = 0) -> None:\n op_reg1_reg2 = super().get_register_operation_tuple(operator, reg1, reg2)\n super().__init__(\n SeqOpCode.BRANCH,\n op_reg1_reg2[0],\n op_reg1_reg2[1],\n op_reg1_reg2[2],\n rel_jump,\n )\n\n def set_jump_value(self, jump_val: int):\n self.immediate = jump_val" }, { "identifier": "SeqWaitImm", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqWaitImm(SeqUTypeInst):\n def __init__(self, duration: int = 0) -> None:\n super().__init__(\n OpCode=SeqOpCode.WAIT_IMM, immediate=((duration & 0xFFFFF) << 12)\n )\n\n @property\n def immediate(self):\n return self._immediate >> 12\n\n def __str__(self):\n return f\"Op: {self.op.name}, dst: {str(self.dst_reg)}, immediate: {hex(self.immediate & 0x000FFFFF)}\\n\"" }, { "identifier": "SeqWaitRegister", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqWaitRegister(SeqUTypeInst):\n def __init__(self, reg: int) -> None:\n super().__init__(OpCode=SeqOpCode.WAIT_REG, dst_reg=reg)" }, { "identifier": "SeqTrigger", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqTrigger(SeqUTypeInst):\n def __init__(\n self,\n module0: int = 0,\n module1: int = 0,\n module2: int = 0,\n module3: int = 0,\n module4: int = 0,\n sync=False,\n reset=False,\n ) -> None:\n self._trig_indices = [module0, module1, module2, module3, module4]\n\n immediate = 0\n immediate |= (reset & 0x1) << 12\n immediate |= (sync & 0x1) << 14\n immediate |= (module0 & 0xF) << 16\n immediate |= (module1 & 0xF) << 20\n immediate |= (module2 & 0xF) << 22\n immediate |= (module3 & 0xF) << 26\n immediate |= (module4 & 0xF) << 30\n super().__init__(OpCode=SeqOpCode.TRIGGER, immediate=immediate)\n\n def __str__(self) -> str:\n return (\n f\"Op: {self.op.name}, mod0: {hex(self._trig_indices[0])}, mod1: {hex(self._trig_indices[1])}\"\n f\", mod2: {hex(self._trig_indices[2])}, mod3: {hex(self._trig_indices[3])}, mod4: {hex(self._trig_indices[4])}\\n\"\n )" }, { "identifier": "SeqEnd", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqEnd(SeqSTypeInst):\n def __init__(self) -> None:\n super().__init__(SeqOpCode.SYNCH, SeqExtSynchFunct3.START, 0, 0, 0)" }, { "identifier": "SeqTriggerWaitRegister", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SeqTriggerWaitRegister(SeqUTypeInst):\n def __init__(self, reg: int) -> None:\n super().__init__(OpCode=SeqOpCode.TRIG_WAIT_REG, dst_reg=reg)" }, { "identifier": "_get_for_range_iterations", "path": "src/qiclib/code/qi_util.py", "snippet": "def _get_for_range_iterations(start, end, step):\n \"\"\"Returns number of iterations of ForRange or None if start or end are QiVariables.\n Stupid but no need to check validity of input, in case of unrolled loop\"\"\"\n from .qi_var_definitions import _QiVariableBase, _QiConstValue, QiCellProperty\n\n if (\n isinstance(start, _QiVariableBase)\n or start is None\n or isinstance(end, _QiVariableBase)\n or end is None\n ):\n return None\n\n if isinstance(start, (_QiConstValue, QiCellProperty)):\n start = start.value\n if isinstance(end, (_QiConstValue, QiCellProperty)):\n end = end.value\n if isinstance(step, (_QiConstValue, QiCellProperty)):\n step = step.value\n\n iterations = 0\n for _ in range(start, end, step):\n iterations += 1\n return iterations" } ]
from enum import Enum from typing import List, Union, Any, Dict, Optional, Tuple from qiclib.code.qi_jobs import ( ForRange, If, Parallel, cQiRecording, cQiSync, ) from .qi_var_definitions import ( QiCellProperty, QiVariableSet, _QiCalcBase, _QiVariableBase, QiExpression, _QiConstValue, QiCondition, QiOpCond, QiOp, ) from .qi_seq_instructions import ( SeqLoad, SeqStore, SeqAwaitQubitState, SequencerInstruction, SeqRegImmediateInst, SeqRegRegInst, SeqLoadUpperImm, SeqJump, SeqBranch, SeqWaitImm, SeqWaitRegister, SeqTrigger, SeqEnd, SeqTriggerWaitRegister, ) from .qi_util import _get_for_range_iterations from .qi_var_definitions import _QiVariableBase from .qi_var_definitions import _QiCalcBase from .qi_var_definitions import _QiVariableBase from .qi_jobs import _cQiPlay_base import warnings import qiclib.packages.utility as util
10,864
"""Returns register to stack; Raises exception when register is already in stack, or addressing is faulty. Releasing register 0 does nothing""" if reg in self._register_stack: raise IndexError("Release Register: Already released register") if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0): raise IndexError("Release Register: Address out of Range") if reg == self.reg0: return reg.valid = True # if register was invalidated and is released again, return it to initial valid state self._register_stack.append(reg) def add_instruction_to_list( self, instruction: SequencerInstruction, length_in_cycles: int = 1, length_valid=True, ): """Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list""" if self._trigger_mods.is_pulse_active: self.trigger_choke_pulse() if length_in_cycles == 0: length_in_cycles = 1 # length is always at least 1 per instruction self.instruction_list.append(instruction) self._prog_cycles.add( length_in_cycles, length_valid ) # Will be deprecated when external sync is possible. def get_prog_size(self) -> int: return len(self.instruction_list) def add_mov_command(self, dst_reg: _Register, src_reg: _Register): """Copies value of src_reg to dst_reg.""" self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg) def get_upper_immediate_value(self, value: SequencerInstruction.imm_type): """If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits and subtract from upper 20 bits.""" sign_extended_lower = ( value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF ) return (value - sign_extended_lower) & 0xFFFFF000 def immediate_to_register( self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None ) -> _Register: """Loads immediate to dst_reg. If dst_reg is not defined a new register is used to save val to. If value == 0 and no register is specified, reg0 is returned, which always contains 0. dst_reg.value is updated to reflect changes.""" if val == 0 and dst_reg is None: return self.reg0 elif dst_reg is None: dst_reg = self.request_register() if isinstance(val, float): raise NotImplementedError("float not implemented yet") if SequencerInstruction.is_value_in_lower_immediate(val): self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, 0, val) ) # register_0 always contains 0 else: upper_immediate = self.get_upper_immediate_value(val) self.add_instruction_to_list(SeqLoadUpperImm(dst_reg.adr, upper_immediate)) self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, dst_reg.adr, val) ) dst_reg.update_register_value(val, QiOp.PLUS, 0) return dst_reg def add_calculation( self, val1: Union[_Register, int, float], operator: QiOp, val2: Union[_Register, int, float], dst_reg: Optional[_Register] = None, ) -> _Register: """Adds calculation command to sequencer. Depending on the values and the operation different commands are added. dst_reg.value is updated to reflect changes.""" if (not isinstance(val1, _Register)) and (not isinstance(val2, _Register)): raise RuntimeError("QiCalc should not contain two int/float") if dst_reg is None: dst_reg = self.request_register() self.alu.calculate(dst_reg, val1, operator, val2) dst_reg.update_register_value(val1, operator, val2) return dst_reg def add_condition( self, reg1: _Register, operator: QiOpCond, reg2: _Register, jmp_val=0 ): """Adds condition command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqBranch(operator, reg1.adr, reg2.adr, jmp_val) self.add_instruction_to_list(cmd) return cmd def add_jump(self, jmp_val=0) -> SeqJump: """Adds jump command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqJump(jmp_val) self.add_instruction_to_list( cmd, length_in_cycles=Sequencer.JUMP_EXECUTION_CYCLES ) return cmd def __evaluate_qicalc_val(self, value: QiExpression) -> Union[_Register, int]: """Return value of QiCalc-Value. If another QiCalc node is found, evaluate node first, then return target register of evaluated node. Return _Register if QiVariable is found. Else return constant register value as int. (Can represent cycles)""" if isinstance(value, _QiCalcBase): return self.add_qi_calc(value) elif isinstance(value, _QiVariableBase): return self.get_var_register(value)
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ The lower level logic of the code generation. This module tracks the sequencer state at the current point (e.g. register values, variable to register mapping, etc.), provides helper functions to generate code for expressions and more. """ class _Register: """Class of Sequencer representing registers. Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else. TODO load commands invalidate value""" def __init__(self, address) -> None: self.adr = address self.value = None self.valid = True def addition(self, val1, val2): self.value = val1 + val2 def subtraction(self, val1, val2): self.value = val1 - val2 def multiplication(self, val1, val2): self.value = val1 * val2 def and_values(self, val1, val2): self.value = val1 & val2 def or_values(self, val1, val2): self.value = val1 | val2 def xor_values(self, val1, val2): self.value = val1 ^ val2 def lshift(self, val1, val2): self.value = val1 << val2 def rshift(self, val1, val2): self.value = val1 >> val2 def inversion(self, val1, val2): self.value = ~val1 # Dictionary used to receive function from input QiOp eval_operation = { QiOp.PLUS: addition, QiOp.MINUS: subtraction, QiOp.MULT: multiplication, QiOp.AND: and_values, QiOp.OR: or_values, QiOp.XOR: xor_values, QiOp.LSH: lshift, QiOp.RSH: rshift, QiOp.NOT: inversion, } def get_value(self): if self.valid: return self.value return None def update_register_value(self, val1, op, val2): """Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used. When a calculation is done using a invalid variable value, the ensuing value is also invalidated. """ if self.adr == 0: self.value = 0 # reg0 always contains 0 return if isinstance(val1, _Register): if val1.value is None: raise RuntimeError( f"Variable at Register {val1.adr} has not been properly initialised" ) if not val1.valid: self.valid = False val1 = val1.value if isinstance(val2, _Register): if val2.value is None: raise RuntimeError( f"Variable at Register {val2.adr} has not been properly initialised" ) if not val2.valid: self.valid = False val2 = val2.value self.eval_operation[op](self, val1, val2) class ForRangeEntry: def __init__(self, reg_addr, start_val, end_val, step_val) -> None: self.reg_addr = reg_addr self.start = start_val self.end = end_val self.step = step_val self.end_addr = 0 self.iterations = 0 self.aggregate_iterations = 0 self.contained_entries: List[ForRangeEntry] = [] def _calc_aggregate(self): """Calculates the number of loops contained inside, considering nested entries, for later use at progress bar.""" self.iterations = _get_for_range_iterations(self.start, self.end, self.step) if len(self.contained_entries) == 0 or self.iterations is None: if self.iterations is None: self.aggregate_iterations = 0 warnings.warn( "A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate." ) else: self.aggregate_iterations = self.iterations else: nested = 0 for entry in self.contained_entries: if entry.aggregate_iterations == 0: warnings.warn( "A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate." ) continue nested += entry.aggregate_iterations self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1) def get_iteration(self, value: int) -> int: """Returns the current iteration depending on the parameter value""" if isinstance(self.start, _QiVariableBase): return 0 _step = self.step if isinstance(self.step, int) else self.step.value iterations = 0 for _ in range(self.start, value, _step): iterations += 1 return iterations @staticmethod def get_total_loops(entry_list): if len(entry_list) == 0: return 1 iterations = 0 for entry in entry_list: iterations += entry.aggregate_iterations return iterations if iterations > 0 else 1 @staticmethod def calculate_current_loop(entry_list, register_list, prog_counter): loop = 0 for entry in entry_list: if entry.end_addr < prog_counter: loop += entry.aggregate_iterations else: iteration = entry.get_iteration(register_list[entry.reg_addr]) if len(entry.contained_entries) == 0: loop += iteration else: loop += iteration * ForRangeEntry.get_total_loops( entry.contained_entries ) + ForRangeEntry.calculate_current_loop( entry.contained_entries, register_list, prog_counter ) return loop return loop class Sequencer: AVAILABLE_REGISTERS = 31 MULTIPLICATION_LENGTH = 6 JUMP_EXECUTION_CYCLES = 2 LOAD_STORE_LENGTH = 8 # Additional delay to prevent ignored trigger for consecutive readouts RECORDING_MODULE_DELAY_CYCLES = 1 CHOKE_PULSE_INDEX = 14 def __init__(self, cell_index=None): self.alu = _ALU(self) self.reset() self.cell_index = cell_index def reset(self): self._register_stack: List[_Register] = [] self.instruction_list: List[SequencerInstruction] = [] self._prog_cycles = _ProgramCycles() self._var_reg_dict: Dict[Any, _Register] = {} self._trigger_mods = _TriggerModules() self._for_range_list = [] self._for_range_stack: List[ForRangeEntry] = [] # register 0 always contains 0, so is not in stack self.reg0 = _Register(0) for x in range(Sequencer.AVAILABLE_REGISTERS, 0, -1): self._register_stack.append(_Register(x)) def print_assembler(self): pc = 0 for instruction in self.instruction_list: print(str(pc) + "# ", end="") print(instruction) pc += 1 @property def prog_cycles(self): """Program length is used for implicit synchs with Wait-Commands. If a program contains variable If/Else or loads to wait registers prog_length can not be determined. Invalid prog_cycles are some value less than 0. """ if self._prog_cycles.valid: return self._prog_cycles.cycles return _ProgramCycles.INVALID @prog_cycles.setter def prog_cycles(self, x): """Set externally when ForRange is used.""" self._prog_cycles.cycles = x @property def recording_delay(self): return util.conv_cycles_to_time(self.RECORDING_MODULE_DELAY_CYCLES) @property def readout_active(self): return self._trigger_mods.is_readout_active @property def manipulation_active(self): return self._trigger_mods.is_manipulation_active def add_variable(self, var): """Adds variable to sequencer, reserving a register for it""" reg = self.request_register() self._var_reg_dict[var.id] = reg # Named variables can be initialized externally if var.name is not None: reg.valid = False reg.value = 0 def release_variable(self, var): self.release_register(self.get_var_register(var)) def get_var_register(self, var) -> _Register: """Returns _Register of QiVariable var""" reg = self._var_reg_dict.get(var.id) if reg is None: raise RuntimeError( f"Variable not defined for Sequencer, var.id:{var.id}, {self._var_reg_dict}" ) return reg def get_var_value(self, var) -> Union[int, float, None]: return self.get_var_register(var).get_value() def request_register(self) -> _Register: """Returns register from stack, raises exception, if no registers are on stack anymore""" try: return self._register_stack.pop() except IndexError as e: print( "Not enough registers available, sequencer " + str(self) + " error: " + str(e) ) raise def get_cycles_from_length(self, length) -> Union[_Register, int]: """If length is QiVariable, return _Register, else return numbers of cycles ceiled""" if isinstance(length, _QiVariableBase): return self.get_var_register(length) elif isinstance(length, int): length = float(length) return util.conv_time_to_cycles(length, "ceil") def release_register(self, reg: _Register): """Returns register to stack; Raises exception when register is already in stack, or addressing is faulty. Releasing register 0 does nothing""" if reg in self._register_stack: raise IndexError("Release Register: Already released register") if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0): raise IndexError("Release Register: Address out of Range") if reg == self.reg0: return reg.valid = True # if register was invalidated and is released again, return it to initial valid state self._register_stack.append(reg) def add_instruction_to_list( self, instruction: SequencerInstruction, length_in_cycles: int = 1, length_valid=True, ): """Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list""" if self._trigger_mods.is_pulse_active: self.trigger_choke_pulse() if length_in_cycles == 0: length_in_cycles = 1 # length is always at least 1 per instruction self.instruction_list.append(instruction) self._prog_cycles.add( length_in_cycles, length_valid ) # Will be deprecated when external sync is possible. def get_prog_size(self) -> int: return len(self.instruction_list) def add_mov_command(self, dst_reg: _Register, src_reg: _Register): """Copies value of src_reg to dst_reg.""" self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg) def get_upper_immediate_value(self, value: SequencerInstruction.imm_type): """If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits and subtract from upper 20 bits.""" sign_extended_lower = ( value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF ) return (value - sign_extended_lower) & 0xFFFFF000 def immediate_to_register( self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None ) -> _Register: """Loads immediate to dst_reg. If dst_reg is not defined a new register is used to save val to. If value == 0 and no register is specified, reg0 is returned, which always contains 0. dst_reg.value is updated to reflect changes.""" if val == 0 and dst_reg is None: return self.reg0 elif dst_reg is None: dst_reg = self.request_register() if isinstance(val, float): raise NotImplementedError("float not implemented yet") if SequencerInstruction.is_value_in_lower_immediate(val): self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, 0, val) ) # register_0 always contains 0 else: upper_immediate = self.get_upper_immediate_value(val) self.add_instruction_to_list(SeqLoadUpperImm(dst_reg.adr, upper_immediate)) self.add_instruction_to_list( SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, dst_reg.adr, val) ) dst_reg.update_register_value(val, QiOp.PLUS, 0) return dst_reg def add_calculation( self, val1: Union[_Register, int, float], operator: QiOp, val2: Union[_Register, int, float], dst_reg: Optional[_Register] = None, ) -> _Register: """Adds calculation command to sequencer. Depending on the values and the operation different commands are added. dst_reg.value is updated to reflect changes.""" if (not isinstance(val1, _Register)) and (not isinstance(val2, _Register)): raise RuntimeError("QiCalc should not contain two int/float") if dst_reg is None: dst_reg = self.request_register() self.alu.calculate(dst_reg, val1, operator, val2) dst_reg.update_register_value(val1, operator, val2) return dst_reg def add_condition( self, reg1: _Register, operator: QiOpCond, reg2: _Register, jmp_val=0 ): """Adds condition command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqBranch(operator, reg1.adr, reg2.adr, jmp_val) self.add_instruction_to_list(cmd) return cmd def add_jump(self, jmp_val=0) -> SeqJump: """Adds jump command to the sequence and returns its reference, to define the jump value at a later point""" cmd = SeqJump(jmp_val) self.add_instruction_to_list( cmd, length_in_cycles=Sequencer.JUMP_EXECUTION_CYCLES ) return cmd def __evaluate_qicalc_val(self, value: QiExpression) -> Union[_Register, int]: """Return value of QiCalc-Value. If another QiCalc node is found, evaluate node first, then return target register of evaluated node. Return _Register if QiVariable is found. Else return constant register value as int. (Can represent cycles)""" if isinstance(value, _QiCalcBase): return self.add_qi_calc(value) elif isinstance(value, _QiVariableBase): return self.get_var_register(value)
elif isinstance(value, _QiConstValue):
5
2023-11-10 10:26:10+00:00
16k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependency injection on Redis\n :type redis_dependency: RedisDependency\n :return: The Redis connection instance as a generator\n :rtype: AsyncGenerator[Redis, None]\n \"\"\"\n async with redis_dependency as redis:\n yield redis" }, { "identifier": "get_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_current_user(\n token: Annotated[str, Depends(oauth2_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n access token\n :param token: The Access token from OAuth2PasswordBearer\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n token_service: TokenService = TokenService(redis, auth_settings)\n is_blacklisted: bool = await token_service.is_token_blacklisted(token)\n if is_blacklisted:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token is blacklisted\",\n )\n return await authenticate_user(token, auth_settings, user_service, redis)" }, { "identifier": "get_refresh_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_refresh_current_user(\n refresh_token: Annotated[str, Depends(refresh_token_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n refresh token\n :param refresh_token: The Refresh token from OAuth2PasswordBearer\n :type refresh_token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n return await authenticate_user(\n refresh_token, auth_settings, user_service, redis\n )" }, { "identifier": "get_auth_settings", "path": "app/config/config.py", "snippet": "def get_init_settings() -> InitSettings:\ndef get_settings() -> Settings:\ndef get_sql_settings() -> SQLDatabaseSettings:\ndef get_auth_settings() -> AuthSettings:" }, { "identifier": "AuthSettings", "path": "app/config/db/auth_settings.py", "snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )" }, { "identifier": "InitSettings", "path": "app/config/init_settings.py", "snippet": "class InitSettings(BaseSettings):\n \"\"\"\n Init Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n case_sensitive=True,\n extra=\"allow\",\n )\n\n ITERATIONS: PositiveInt = 100000\n KEY_BYTES_LENGTH: PositiveInt = 32\n SALT_BYTES: PositiveInt = 16\n IV_BYTES: PositiveInt = 12\n PUBLIC_EXPONENT: PositiveInt = 65537\n RSA_KEY_BITS: PositiveInt = 2048\n SALUTE: str = \"Salute!\"\n ROOT_MSG: str = \"Hello, World!\"\n SERVER_NAME: str = \"FastAPI Boilerplate\"\n PROJECT_NAME: str = \"fastapi-boilerplate\"\n VERSION: str = \"1.0\"\n ENCODING: str = \"UTF-8\"\n DEFAULT_REGION: str = \"Guayas\"\n DEFAULT_COUNTRY: str = \"Ecuador\"\n OPENAPI_FILE_PATH: str = \"/openapi.json\"\n DATE_FORMAT: str = \"%Y-%m-%d\"\n DATETIME_FORMAT: str = \"%Y-%m-%d %H:%M:%S\"\n FILE_DATE_FORMAT: str = \"%d-%b-%Y-%H-%M-%S\"\n IMAGES_APP: str = \"images\"\n IMAGES_PATH: str = \"/assets/images\"\n IMAGES_DIRECTORY: str = \"assets/images\"\n EMAIL_TEMPLATES_DIR: str = \"templates\"\n PASSWORD_RECOVERY_SUBJECT: str = \"Password recovery for user\"\n NEW_ACCOUNT_SUBJECT: str = \"New account for user\"\n WELCOME_SUBJECT: str = \"Welcome to \"\n PASSWORD_CHANGED_CONFIRMATION_SUBJECT: str = (\n \"Successfully password \" \"changed for \"\n )\n DELETE_ACCOUNT_SUBJECT: str = \"Account deleted for \"\n LOG_FORMAT: str = (\n \"[%(name)s][%(asctime)s][%(levelname)s][%(module)s]\"\n \"[%(funcName)s][%(lineno)d]: %(message)s\"\n )\n PASSWORD_REGEX: str = (\n \"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?\" \"[#?!@$%^&*-]).{8,14}$\"\n )\n\n SUMMARY: str = \"\"\"This backend project is FastAPI template.\n This project serves as the backend, which aims to provide a robust and\n reliable system to its users.\n This backend application plays a crucial role in providing the\n functionality for user authentication, real-time monitoring,\n data processing, and advanced alerting system. It is designed to ensure\n the scalability and maintainability of the mobile app,\n making it a vital part of the overall solution.\n \"\"\"\n DESCRIPTION: str = f\"\"\"**FastAPI**, **SQLAlchemy** and **Redis** helps you\n do awesome stuff. 🚀\n \\n\\n<img src=\"data:image/png;base64,{img_b64}\"/>\"\"\"\n LICENSE_INFO: dict[str, str] = {\n \"name\": \"MIT\",\n \"identifier\": \"MIT\",\n }\n TAGS_METADATA: list[dict[str, str]] = [\n {\n \"name\": \"user\",\n \"description\": f\"\"\"Operations with users, such as register, get,\n update and delete.\\n\\n<img src=\"data:image/png;base64,\n {users_b64}\" width=\"150\" height=\"100\"/>\"\"\",\n },\n {\n \"name\": \"auth\",\n \"description\": f\"\"\"The authentication logic is here as well as\n password recovery and reset.\n \\n\\n<img src=\"data:image/png;base64,{auth_b64}\" width=\"75\"\n height=\"75\"/>\"\"\",\n },\n ]\n USER_CREATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user create object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone number `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"5939876a4321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n \"region\": \"Andes\",\n \"country\": \"New York\",\n \"postal_code\": \"999999\",\n },\n },\n },\n }\n USER_UPDATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user update object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone numbers `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"59398x54321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n },\n },\n },\n }\n EMAIL_BODY_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** email object that works correctly.\",\n \"value\": \"[email protected]\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": 123,\n },\n }\n TOKEN_PAYLOAD_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** token payload object that works \"\n \"correctly.\",\n \"value\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"token\": \"123\",\n \"password\": \"abc123\",\n },\n },\n }\n AUTHORIZATION_HEADER_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** authorization token object that works \"\n \"correctly.\",\n \"value\": jwt.encode(\n claims=jsonable_encoder(\n {\n \"sub\": f\"username:{str(uuid4())}\",\n \"nationalities\": [\"ECU\"],\n \"email\": \"[email protected]\",\n \"nickname\": \"example\",\n \"preferred_username\": \"example\",\n \"given_name\": \"Some\",\n \"family_name\": \"Example\",\n \"middle_name\": \"One\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"updated_at\": datetime.now(),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n \"exp\": int(time.time()) + 1800,\n \"nbf\": int(time.time()) - 1,\n \"iat\": int(time.time()),\n }\n ),\n key=\"f52e826e62cdd364c86f129cb18db2fe2be93859c5104cac9585f\"\n \"305378dce65\",\n algorithm=\"HS256\",\n ),\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": \"123\",\n },\n }\n LIMIT_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** limit query parameter that works \"\n \"correctly.\",\n \"value\": 1,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert limit `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"5\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }\n SKIP_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** skip query parameter that works \"\n \"correctly.\",\n \"value\": 0,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert skip `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"20\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }" }, { "identifier": "Settings", "path": "app/config/settings.py", "snippet": "class Settings(BaseSettings):\n \"\"\"\n Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n SMTP_PORT: PositiveInt\n SMTP_HOST: str\n SMTP_USER: str\n SMTP_PASSWORD: str\n MAIL_SUBJECT: str\n MAIL_TIMEOUT: float\n EMAILS_FROM_EMAIL: Optional[EmailStr] = None\n EMAILS_FROM_NAME: Optional[str] = None\n SUPERUSER_EMAIL: EmailStr\n SUPERUSER_FIRST_NAME: str\n SUPERUSER_PASSWORD: str\n SUPERUSER_LAST_NAME: str\n SUPERUSER_STREET_ADDRESS: str\n SUPERUSER_LOCALITY: str\n SUPERUSER_POSTAL_CODE: str\n BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []\n\n PUBLIC_KEY_PATH: FilePath\n PRIVATE_KEY_PATH: FilePath\n\n @field_validator(\n \"PUBLIC_KEY_PATH\", \"PRIVATE_KEY_PATH\", mode=\"before\", check_fields=True\n )\n def validate_key_paths(cls, key_path: FilePath) -> FilePath:\n \"\"\"\n Validate the provided key path.\n :param key_path: Provided key path\n :type key_path: FilePath\n :return: The validated key path\n :rtype: FilePath\n \"\"\"\n if not str(key_path).endswith(\".pem\"):\n raise ValueError(f\"{key_path} must have a .pem extension\")\n base_name: str = os.path.basename(key_path)\n if not base_name.endswith(\"key.pem\"):\n raise ValueError(\n f\"{key_path} must have a file name ending with 'key'\"\n )\n return key_path\n\n @field_validator(\"BACKEND_CORS_ORIGINS\", mode=\"before\")\n def assemble_cors_origins(\n cls, v: Union[str, list[str]]\n ) -> Union[list[str], str]:\n \"\"\"\n Assemble a list of allowed CORS origins.\n :param v: Provided CORS origins, either a string or a list of\n strings\n :type v: Union[str, list[str]]\n :return: List of Backend CORS origins to be accepted\n :rtype: Union[list[str], str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n if isinstance(v, list):\n return v\n raise ValueError(v)\n\n CONTACT_NAME: Optional[str] = None\n CONTACT_URL: Optional[AnyHttpUrl] = None\n CONTACT_EMAIL: Optional[EmailStr] = None\n CONTACT: Optional[dict[str, Any]] = None\n\n @field_validator(\"CONTACT\", mode=\"before\")\n def assemble_contact(\n cls, v: Optional[str], info: ValidationInfo\n ) -> dict[str, str]:\n \"\"\"\n Assemble contact information\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The contact attribute\n :rtype: dict[str, str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n contact: dict[str, Any] = {}\n if info.data.get(\"CONTACT_NAME\"):\n contact[\"name\"] = info.data.get(\"CONTACT_NAME\")\n if info.data.get(\"CONTACT_URL\"):\n contact[\"url\"] = info.data.get(\"CONTACT_URL\")\n if info.data.get(\"CONTACT_EMAIL\"):\n contact[\"email\"] = info.data.get(\"CONTACT_EMAIL\")\n return contact" }, { "identifier": "verify_password", "path": "app/core/security/password.py", "snippet": "def verify_password(hashed_password: str, plain_password: str) -> bool:\n \"\"\"\n Verifies if a plain text password matches a hashed password\n :param plain_password: The plain text password to verify\n :type plain_password: str\n :param hashed_password: The hashed password to compare against\n :type hashed_password: str\n :return: True if the passwords match, False otherwise\n :rtype: bool\n \"\"\"\n if not plain_password:\n raise_custom_error(\"Plain password cannot be empty or None\")\n if not hashed_password:\n raise_custom_error(\"Hashed password cannot be empty or None\")\n return crypt_context.verify(plain_password, hashed_password)" }, { "identifier": "NotFoundException", "path": "app/exceptions/exceptions.py", "snippet": "class NotFoundException(Exception):\n \"\"\"\n Not Found Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "ServiceException", "path": "app/exceptions/exceptions.py", "snippet": "class ServiceException(Exception):\n \"\"\"\n Service Layer Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "User", "path": "app/models/sql/user.py", "snippet": "class User(Base): # type: ignore\n \"\"\"\n User model class representing the \"users\" table\n \"\"\"\n\n __tablename__ = \"users\"\n\n id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n index=True,\n nullable=False,\n primary_key=True,\n unique=True,\n server_default=text(\"(gen_random_uuid())\"),\n comment=\"ID of the User\",\n )\n username: Mapped[str] = mapped_column(\n String(15),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Username to identify the user\",\n )\n email: Mapped[EmailStr] = mapped_column(\n String(320),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Preferred e-mail address of the User\",\n )\n first_name: Mapped[str] = mapped_column(\n String(50), nullable=False, comment=\"First name(s) of the User\"\n )\n middle_name: Mapped[str] = mapped_column(\n String(50), nullable=True, comment=\"Middle name(s) of the User\"\n )\n last_name: Mapped[str] = mapped_column(\n String(100), nullable=False, comment=\"Last name(s) of the User\"\n )\n password: Mapped[str] = mapped_column(\n String(60), nullable=False, comment=\"Hashed password of the User\"\n )\n gender: Mapped[Gender] = mapped_column(\n Enum(Gender), nullable=True, comment=\"Gender of the User\"\n )\n birthdate: Mapped[PastDate] = mapped_column(\n Date, nullable=True, comment=\"Birthday of the User\"\n )\n phone_number: Mapped[PhoneNumber] = mapped_column(\n String(20),\n nullable=True,\n comment=\"Preferred telephone number of the User\",\n )\n is_active: Mapped[bool] = mapped_column(\n Boolean(),\n default=True,\n nullable=False,\n server_default=text(\"true\"),\n comment=\"True if the user is active; otherwise false\",\n )\n is_superuser: Mapped[bool] = mapped_column(\n Boolean(),\n default=False,\n nullable=False,\n server_default=text(\"false\"),\n comment=\"True if the user is super user; otherwise false\",\n )\n created_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n default=datetime.now(),\n nullable=False,\n server_default=text(\"now()\"),\n comment=\"Time the User was created\",\n )\n updated_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n nullable=True,\n onupdate=text(\"now()\"),\n comment=\"Time the User was updated\",\n )\n address_id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n ForeignKey(\n \"users_address.id\",\n name=\"users_address_id_fkey\",\n ),\n nullable=False,\n comment=\"ID of the User's address\",\n )\n address: Mapped[\"Address\"] = relationship( # type: ignore\n \"Address\", back_populates=\"users\", lazy=\"joined\"\n )\n\n __table_args__ = (\n CheckConstraint(\n \"char_length(username) >= 4\", name=\"users_username_length\"\n ),\n CheckConstraint(\"char_length(email) >= 3\", name=\"users_email_length\"),\n CheckConstraint(\n sql_database_setting.DB_EMAIL_CONSTRAINT, name=\"users_email_format\"\n ),\n CheckConstraint(\n \"char_length(first_name) >= 1\", name=\"users_first_name_length\"\n ),\n CheckConstraint(\n \"char_length(last_name) >= 1\", name=\"users_last_name_length\"\n ),\n CheckConstraint(\"LENGTH(password) = 60\", name=\"users_password_length\"),\n CheckConstraint(\n sql_database_setting.DB_PHONE_NUMBER_CONSTRAINT,\n name=\"users_phone_number_format\",\n ),\n )" }, { "identifier": "Msg", "path": "app/schemas/external/msg.py", "snippet": "class Msg(BaseModel):\n \"\"\"\n Schema for representing a message.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\"example\": {\"msg\": \"Hello, World!!!\"}}\n )\n\n msg: str = Field(..., title=\"Message\", description=\"Message to display\")" }, { "identifier": "TokenResetPassword", "path": "app/schemas/external/token.py", "snippet": "class TokenResetPassword(BaseModel):\n \"\"\"\n Token Reset Password for Request based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n }\n }\n )\n\n token: str = Field(\n ..., title=\"Token\", description=\"Access token\", min_length=30\n )\n password: str = Field(\n ...,\n title=\"New password\",\n description=\"New password to reset\",\n validate_default=True,\n min_length=8,\n max_length=14,\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password to be validated\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "TokenResponse", "path": "app/schemas/external/token.py", "snippet": "class TokenResponse(Token):\n \"\"\"\n Token for Response based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_response_example,\n )\n\n token_type: str = Field(\n default=\"bearer\", title=\"Token type\", description=\"Type of the token\"\n )" }, { "identifier": "UserResponse", "path": "app/schemas/external/user.py", "snippet": "class UserResponse(UserID, UserBase, UserOptional, UserInDB):\n \"\"\"\n Schema for the response when retrieving a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_response_example,\n )" }, { "identifier": "UserUpdate", "path": "app/schemas/external/user.py", "snippet": "class UserUpdate(BaseModel):\n \"\"\"\n Schema for updating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_example,\n )\n\n username: Optional[str] = Field(\n default=None,\n title=\"Username\",\n description=\"Username to identify the user\",\n min_length=4,\n max_length=15,\n )\n email: Optional[EmailStr] = Field(\n default=None,\n title=\"Email\",\n description=\"Preferred e-mail address of the User\",\n )\n first_name: Optional[str] = Field(\n default=None,\n title=\"First name\",\n description=\"First name(s) of the User\",\n min_length=1,\n max_length=50,\n )\n middle_name: Optional[str] = Field(\n default=None,\n title=\"Middle Name\",\n description=\"Middle name(s) of the User\",\n max_length=50,\n )\n last_name: Optional[str] = Field(\n default=None,\n title=\"Last name\",\n description=\"Last name(s) of the User\",\n min_length=1,\n max_length=100,\n )\n password: Optional[str] = Field(\n default=None,\n title=\"New Password\",\n description=\"New Password of the User\",\n min_length=8,\n max_length=14,\n )\n gender: Optional[Gender] = Field(\n default=None, title=\"Gender\", description=\"Gender of the User\"\n )\n birthdate: Optional[date] = Field(\n default=None, title=\"Birthdate\", description=\"Birthday of the User\"\n )\n phone_number: Optional[PhoneNumber] = Field(\n default=None,\n title=\"Phone number\",\n description=\"Preferred telephone number of the User\",\n )\n address: Optional[Address] = Field(\n default=None, title=\"Address\", description=\"Address of the User\"\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password value to validate\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "UserUpdateResponse", "path": "app/schemas/external/user.py", "snippet": "class UserUpdateResponse(\n UserAuth, UserName, UserPassword, UserOptional, UserInDB\n):\n \"\"\"\n Schema for the response when updating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_response_example,\n )" }, { "identifier": "UserAuth", "path": "app/schemas/infrastructure/user.py", "snippet": "class UserAuth(UserID, UserBaseAuth):\n \"\"\"\n User Auth that inherits from UserID.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_auth_example,\n )" }, { "identifier": "common_auth_procedure", "path": "app/services/infrastructure/auth.py", "snippet": "async def common_auth_procedure(\n user: User,\n client_ip: str,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n) -> TokenResponse:\n \"\"\"\n Common authentication procedure for login and refresh token based on\n token generation\n :param user: The user to authenticate\n :type user: User\n :param client_ip: The IP address of the client\n :type client_ip: str\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The token response object\n :rtype: TokenResponse\n \"\"\"\n auth_token = AuthService.auth_token(user, auth_settings)\n user_info = f\"{str(user.id)}:{client_ip}\"\n token = TokenDB(key=auth_token.refresh_token, user_info=user_info)\n token_service = TokenService(redis, auth_settings)\n token_set = await token_service.create_token(token)\n if not token_set:\n detail = \"Could not insert data in Authentication database\"\n logger.warning(detail)\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=detail\n )\n return TokenResponse(**auth_token.model_dump())" }, { "identifier": "TokenService", "path": "app/services/infrastructure/token.py", "snippet": "class TokenService:\n \"\"\"\n Service class for token operations in the authentication database\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n ):\n self._redis: Redis = redis # type: ignore\n self._refresh_token_expire_minutes: (\n PositiveInt\n ) = auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES\n self._blacklist_expiration_seconds: PositiveInt = (\n PositiveInt(\n PositiveInt(auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES) + 1\n )\n * 60\n ) # converting minutes to seconds\n\n @handle_redis_exceptions\n @benchmark\n async def create_token(self, token: Token) -> bool:\n \"\"\"\n Create a token in authentication database\n :param token: Token object with key and value\n :type token: Token\n :return: True if the token was inserted; otherwise false\n :rtype: bool\n \"\"\"\n try:\n inserted: bool = await self._redis.setex(\n token.key,\n self._refresh_token_expire_minutes,\n token.user_info,\n )\n except RedisError as r_exc:\n logger.error(\"Error at creating token. %s\", r_exc)\n raise r_exc\n return inserted\n\n @handle_redis_exceptions\n @benchmark\n async def get_token(self, key: str) -> Optional[str]:\n \"\"\"\n Read token from the authentication database\n :param key: The key to search for\n :type key: str\n :return: The refresh token\n :rtype: str\n \"\"\"\n try:\n value: str = str(await self._redis.get(key))\n except RedisError as r_exc:\n logger.error(\"Error at getting token. %s\", r_exc)\n raise r_exc\n return value\n\n @handle_redis_exceptions\n @benchmark\n async def blacklist_token(self, token_key: str) -> bool:\n \"\"\"\n Blacklist a given token.\n :param token_key: The token key to blacklist.\n :type token_key: str\n :return: True if the token was successfully blacklisted,\n otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: bool = await self._redis.setex(\n f\"blacklist:{token_key}\",\n self._blacklist_expiration_seconds,\n \"true\",\n )\n except RedisError as r_exc:\n logger.error(\"Error at blacklisting token. %s\", r_exc)\n raise r_exc\n return blacklisted\n\n @handle_redis_exceptions\n @benchmark\n async def is_token_blacklisted(self, token_key: str) -> bool:\n \"\"\"\n Check if a given token is blacklisted.\n :param token_key: The token key to verify.\n :type token_key: str\n :return: True if the token is blacklisted, otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: Optional[str] = await self._redis.get(\n f\"blacklist\" f\":{token_key}\"\n )\n except RedisError as r_exc:\n logger.error(\"Error at checking if token is blacklisted. %s\", r_exc)\n raise r_exc\n return bool(blacklisted)" }, { "identifier": "UserService", "path": "app/services/infrastructure/user.py", "snippet": "class UserService:\n \"\"\"\n Service class for user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n user_repo: UserRepository,\n redis: Redis, # type: ignore\n ):\n self._user_repo: UserRepository = user_repo\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_user_by_id(self, user_id: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique identifier\n :param user_id: The unique identifier of the user\n :type user_id: UUID4\n :return: User information\n :rtype: Optional[UserResponse]\n \"\"\"\n user: Optional[User]\n try:\n user = await self._user_repo.read_by_id(IdSpecification(user_id))\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n detail: str = f\"User with id {user_id} not found in the system.\"\n logger.error(detail)\n raise NotFoundException(detail)\n user_response: Optional[\n UserResponse\n ] = await model_to_response( # type: ignore\n user, UserResponse\n )\n return user_response\n\n async def get_login_user(self, username: str) -> User:\n \"\"\"\n Retrieve user information for login purposes by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: User\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_username(\n UsernameSpecification(username)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with username: {username}\")\n return user\n\n async def get_user_by_username(\n self, username: str\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: UserResponse\n \"\"\"\n try:\n user: User = await self.get_login_user(username)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_by_email(\n self, email: EmailStr\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique email.\n :param email: The email to retrieve User from\n :type email: EmailStr\n :return: User found in database\n :rtype: Optional[UserResponse]\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with email: {email}\")\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_id_by_email(self, email: EmailStr) -> UUID4:\n \"\"\"\n Read the user ID from the database with unique email.\n :param email: Email to retrieve User from\n :type email: EmailStr\n :return: User ID found in database\n :rtype: UUID4\n \"\"\"\n try:\n user_id: Optional[UUID4] = await self._user_repo.read_id_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user_id:\n raise ServiceException(f\"User ID not found with email: {email}\")\n return user_id\n\n async def register_user(\n self, user: Union[UserCreate, UserSuperCreate]\n ) -> Optional[UserCreateResponse]:\n \"\"\"\n Register a new user in the database\n :param user: Request object representing the user\n :type user: Union[UserCreate, UserSuperCreate]\n :return: Response object representing the created user in the\n database\n :rtype: UserCreateResponse\n \"\"\"\n try:\n created_user = await self._user_repo.create_user(user)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(\n created_user, UserCreateResponse # type: ignore\n )\n\n async def get_users(\n self, offset: Optional[NonNegativeInt], limit: Optional[PositiveInt]\n ) -> list[UserResponse]:\n \"\"\"\n Retrieve users' information from the table\n :param offset: Offset from where to start returning users\n :type offset: NonNegativeInt\n :param limit: Limit the number of results from query\n :type limit: PositiveInt\n :return: User information\n :rtype: list[UserResponse]\n \"\"\"\n try:\n users: list[User] = await self._user_repo.read_users(offset, limit)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n found_users: list[UserResponse] = [\n await model_to_response(user, UserResponse) # type: ignore\n for user in users\n ]\n return found_users\n\n async def update_user(\n self, user_id: UUID4, user: UserUpdate\n ) -> Optional[UserUpdateResponse]:\n \"\"\"\n Update user information from table\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :param user: Requested user information to update\n :type user: UserUpdate\n :return: User information\n :rtype: Optional[UserUpdateResponse]\n \"\"\"\n try:\n updated_user: Optional[User] = await self._user_repo.update_user(\n IdSpecification(user_id), user\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not updated_user:\n raise ServiceException(\n f\"User with user_id: {user_id} could not be updated\"\n )\n return await model_to_response(\n updated_user, UserUpdateResponse # type: ignore\n )\n\n async def delete_user(self, user_id: UUID4) -> dict[str, Any]:\n \"\"\"\n Deletes a user by its id\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :return: Data to confirmation info about the delete process\n :rtype: dict[str, Any]\n \"\"\"\n deleted: bool\n deleted_at: Optional[datetime]\n try:\n deleted = await self._user_repo.delete_user(\n IdSpecification(user_id)\n )\n deleted_at = datetime.now()\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n deleted = False\n deleted_at = None\n finally:\n return {\"ok\": deleted, \"deleted_at\": deleted_at}" }, { "identifier": "get_user_service", "path": "app/services/infrastructure/user.py", "snippet": "async def get_user_service(\n user_repo: Annotated[UserRepository, Depends(get_user_repository)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserService:\n \"\"\"\n Get an instance of the user service with the given repository.\n :param user_repo: User repository object for database connection\n :type user_repo: UserRepository\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: UserService instance with repository associated\n :rtype: UserService\n \"\"\"\n return UserService(user_repo, redis)" }, { "identifier": "send_password_changed_confirmation_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_password_changed_confirmation_email(\n email_to: EmailStr,\n username: str,\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n settings: Annotated[Settings, Depends(get_settings)],\n) -> bool:\n \"\"\"\n Send a password changed confirmation email\n :param email_to: The email address of the recipient with password\n changed\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :return: True if the email was sent; otherwise false\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PASSWORD_CHANGED_CONFIRMATION_SUBJECT}\" f\" {username}\"\n )\n template_str: str = await build_email_template(\n \"password_changed_confirmation.html\", init_settings\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"mailto:{settings.CONTACT_EMAIL}?subject=\"\n f\"{init_settings.PROJECT_NAME} password changed\",\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "send_reset_password_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_reset_password_email(\n email_to: EmailStr,\n username: str,\n token: str,\n settings: Annotated[Settings, Depends(get_settings)],\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> bool:\n \"\"\"\n Sends a password reset email to a user with the given email address\n :param email_to: The email address of the user\n :type email_to: EmailStr\n :param username: The username of the user\n :type username: str\n :param token: The reset password token generated for the user\n :type token: str\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: True if the email was sent successfully; False otherwise\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PROJECT_NAME} -\"\n f\" {init_settings.PASSWORD_RECOVERY_SUBJECT} {username}\"\n )\n template_str: str = await build_email_template(\n \"reset_password.html\", init_settings\n )\n link: str = (\n f\"{auth_settings.SERVER_URL}\"\n f\"{auth_settings.AUTH_URL}reset-password?token={token}\"\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"valid_hours\": auth_settings.EMAIL_RESET_TOKEN_EXPIRE_HOURS,\n \"link\": link,\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "generate_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def generate_password_reset_token(\n email: EmailStr,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> str:\n \"\"\"\n Generate a password reset token for the given email address.\n :param email: The email to generate the reset token for\n :type email: EmailStr\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The password reset token\n :rtype: str\n \"\"\"\n payload: dict[str, Any] = generate_password_reset_payload(\n email, auth_settings\n )\n return encode_jwt(payload, auth_settings)" }, { "identifier": "verify_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def verify_password_reset_token(\n token: str,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> Optional[EmailStr]:\n \"\"\"\n Verify a password reset token and return the email address if valid.\n :param token: The JSON Web Token\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The email address\n :rtype: EmailStr\n \"\"\"\n decoded_token: Optional[dict[str, Any]] = decode_jwt(token, auth_settings)\n return decoded_token.get(\"sub\") if decoded_token else None" } ]
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
13,839
request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user
@router.post("/recover-password/{email}", response_model=Msg)
11
2023-11-17 00:32:32+00:00
16k
vitant-lang/CBAM-ASPP
train.py
[ { "identifier": "DeepLab", "path": "nets/deeplabv3_plus.py", "snippet": "class DeepLab(nn.Module):\n\tdef __init__(self, num_classes, backbone=\"mobilenet\", pretrained=True, downsample_factor=16):\n\t\tsuper(DeepLab, self).__init__()\n\t\tif backbone==\"xception\":\n\t\t\t#----------------------------------#\n\t\t\t# 获得两个特征层\n\t\t\t# 浅层特征 [128,128,256]\n\t\t\t# 主干部分 [30,30,2048]\n\t\t\t#----------------------------------#\n\t\t\tself.backbone = xception(downsample_factor=downsample_factor, pretrained=pretrained)\n\t\t\tin_channels = 2048\n\t\t\tlow_level_channels = 256\n\t\telif backbone==\"mobilenet\":\n\t\t\t#----------------------------------#\n\t\t\t# 获得两个特征层\n\t\t\t# 浅层特征 [128,128,24]\n\t\t\t# 主干部分 [30,30,320]\n\t\t\t#----------------------------------#\n\t\t\tself.backbone = MobileNetV2(downsample_factor=downsample_factor, pretrained=pretrained)\n\t\t\tin_channels = 320\n\t\t\tlow_level_channels = 24\n\t\telse:\n\t\t\traise ValueError('Unsupported backbone - `{}`, Use mobilenet, xception.'.format(backbone))\n\n\t\t#-----------------------------------------#\n\t\t# ASPP特征提取模块\n\t\t# 利用不同膨胀率的膨胀卷积进行特征提取\n\t\t#-----------------------------------------#\n\t\tself.aspp = ASPP(dim_in=in_channels, dim_out=256, rate=16//downsample_factor)\n\n\t\t#----------------------------------#\n\t\t# 浅层特征边\n\t\t#----------------------------------#\n\t\tself.shortcut_conv = nn.Sequential(\n\t\t\tnn.Conv2d(low_level_channels, 48, 1),\n\t\t\tnn.BatchNorm2d(48),\n\t\t\tnn.ReLU(inplace=True)\n\t\t)\n\n\t\tself.cat_conv = nn.Sequential(\n\t\t\tnn.Conv2d(48+256, 256, 3, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(256),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout(0.5),\n\n\t\t\tnn.Conv2d(256, 256, 3, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(256),\n\t\t\tnn.ReLU(inplace=True),\n\n\t\t\tnn.Dropout(0.1),\n\t\t)\n\t\tself.cls_conv = nn.Conv2d(256, num_classes, 1, stride=1)\n\n\tdef forward(self, x):\n\t\tH, W = x.size(2), x.size(3)\n\t\t#-----------------------------------------#\n\t\t# 获得两个特征层\n\t\t# low_level_features: 浅层特征-进行卷积处理\n\t\t# x : 主干部分-利用ASPP结构进行加强特征提取\n\t\t#-----------------------------------------#\n\t\tlow_level_features, x = self.backbone(x)\n\n\n\t\tx = self.aspp(x)\n\t\tlow_level_features = self.shortcut_conv(low_level_features)\n\n\t\t#-----------------------------------------#\n\t\t# 将加强特征边上采样\n\t\t# 与浅层特征堆叠后利用卷积进行特征提取\n\t\t#-----------------------------------------#\n\t\tx = F.interpolate(x, size=(low_level_features.size(2), low_level_features.size(3)), mode='bilinear', align_corners=True)\n\t\tx = self.cat_conv(torch.cat((x, low_level_features), dim=1))\n\t\tx = self.cls_conv(x)\n\t\tx = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\n\t\treturn x" }, { "identifier": "get_lr_scheduler", "path": "nets/deeplabv3_training.py", "snippet": "def get_lr_scheduler(lr_decay_type, lr, min_lr, total_iters, warmup_iters_ratio = 0.1, warmup_lr_ratio = 0.1, no_aug_iter_ratio = 0.3, step_num = 10):\n def yolox_warm_cos_lr(lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter, iters):\n if iters <= warmup_total_iters:\n # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start\n lr = (lr - warmup_lr_start) * pow(iters / float(warmup_total_iters), 2) + warmup_lr_start\n elif iters >= total_iters - no_aug_iter:\n lr = min_lr\n else:\n lr = min_lr + 0.5 * (lr - min_lr) * (\n 1.0 + math.cos(math.pi* (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter))\n )\n return lr\n\n def step_lr(lr, decay_rate, step_size, iters):\n if step_size < 1:\n raise ValueError(\"step_size must above 1.\")\n n = iters // step_size\n out_lr = lr * decay_rate ** n\n return out_lr\n\n if lr_decay_type == \"cos\":\n warmup_total_iters = min(max(warmup_iters_ratio * total_iters, 1), 3)\n warmup_lr_start = max(warmup_lr_ratio * lr, 1e-6)\n no_aug_iter = min(max(no_aug_iter_ratio * total_iters, 1), 15)\n func = partial(yolox_warm_cos_lr ,lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter)\n else:\n decay_rate = (min_lr / lr) ** (1 / (step_num - 1))\n step_size = total_iters / step_num\n func = partial(step_lr, lr, decay_rate, step_size)\n\n return func" }, { "identifier": "set_optimizer_lr", "path": "nets/deeplabv3_training.py", "snippet": "def set_optimizer_lr(optimizer, lr_scheduler_func, epoch):\n lr = lr_scheduler_func(epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr" }, { "identifier": "weights_init", "path": "nets/deeplabv3_training.py", "snippet": "def weights_init(net, init_type='normal', init_gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and classname.find('Conv') != -1:\n if init_type == 'normal':\n torch.nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n elif classname.find('BatchNorm2d') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n print('initialize network with %s type' % init_type)\n net.apply(init_func)" }, { "identifier": "LossHistory", "path": "utils/callbacks.py", "snippet": "class LossHistory():\n def __init__(self, log_dir, model, input_shape):\n self.log_dir = log_dir\n self.losses = []\n self.val_loss = []\n \n os.makedirs(self.log_dir)\n self.writer = SummaryWriter(self.log_dir)\n try:\n dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])\n self.writer.add_graph(model, dummy_input)\n except:\n pass\n\n def append_loss(self, epoch, loss, val_loss):\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n self.losses.append(loss)\n self.val_loss.append(val_loss)\n\n with open(os.path.join(self.log_dir, \"epoch_loss.txt\"), 'a') as f:\n f.write(str(loss))\n f.write(\"\\n\")\n with open(os.path.join(self.log_dir, \"epoch_val_loss.txt\"), 'a') as f:\n f.write(str(val_loss))\n f.write(\"\\n\")\n\n self.writer.add_scalar('loss', loss, epoch)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n self.loss_plot()\n\n def loss_plot(self):\n iters = range(len(self.losses))\n\n plt.figure()\n plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')\n plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')\n try:\n if len(self.losses) < 25:\n num = 5\n else:\n num = 15\n \n plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')\n plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')\n except:\n pass\n\n plt.grid(True)\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend(loc=\"upper right\")\n\n plt.savefig(os.path.join(self.log_dir, \"epoch_loss.png\"))\n\n plt.cla()\n plt.close(\"all\")" }, { "identifier": "EvalCallback", "path": "utils/callbacks.py", "snippet": "class EvalCallback():\n def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \\\n miou_out_path=\".temp_miou_out\", eval_flag=True, period=1):\n super(EvalCallback, self).__init__()\n \n self.net = net\n self.input_shape = input_shape\n self.num_classes = num_classes\n self.image_ids = image_ids\n self.dataset_path = dataset_path\n self.log_dir = log_dir\n self.cuda = cuda\n self.miou_out_path = miou_out_path\n self.eval_flag = eval_flag\n self.period = period\n \n self.image_ids = [image_id.split()[0] for image_id in image_ids]\n self.mious = [0]\n self.epoches = [0]\n if self.eval_flag:\n with open(os.path.join(self.log_dir, \"epoch_miou.txt\"), 'a') as f:\n f.write(str(0))\n f.write(\"\\n\")\n\n def get_miou_png(self, image):\n #---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n #---------------------------------------------------------#\n image = cvtColor(image)\n orininal_h = np.array(image).shape[0]\n orininal_w = np.array(image).shape[1]\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n #---------------------------------------------------------#\n image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))\n #---------------------------------------------------------#\n # 添加上batch_size维度\n #---------------------------------------------------------#\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n \n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n #---------------------------------------------------#\n # 进行图片的resize\n #---------------------------------------------------#\n pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = pr.argmax(axis=-1)\n \n image = Image.fromarray(np.uint8(pr))\n return image\n \n def on_epoch_end(self, epoch, model_eval):\n if epoch % self.period == 0 and self.eval_flag:\n self.net = model_eval\n gt_dir = os.path.join(self.dataset_path, \"VOC2007/SegmentationClass/\")\n pred_dir = os.path.join(self.miou_out_path, 'detection-results')\n if not os.path.exists(self.miou_out_path):\n os.makedirs(self.miou_out_path)\n if not os.path.exists(pred_dir):\n os.makedirs(pred_dir)\n print(\"Get miou.\")\n for image_id in tqdm(self.image_ids):\n #-------------------------------#\n # 从文件中读取图像\n #-------------------------------#\n image_path = os.path.join(self.dataset_path, \"VOC2007/JPEGImages/\"+image_id+\".jpg\")\n image = Image.open(image_path)\n #------------------------------#\n # 获得预测txt\n #------------------------------#\n image = self.get_miou_png(image)\n image.save(os.path.join(pred_dir, image_id + \".png\"))\n \n print(\"Calculate miou.\")\n _, IoUs, _, _ = compute_mIoU(gt_dir, pred_dir, self.image_ids, self.num_classes, None) # 执行计算mIoU的函数\n temp_miou = np.nanmean(IoUs) * 100\n\n self.mious.append(temp_miou)\n self.epoches.append(epoch)\n\n with open(os.path.join(self.log_dir, \"epoch_miou.txt\"), 'a') as f:\n f.write(str(temp_miou))\n f.write(\"\\n\")\n \n plt.figure()\n plt.plot(self.epoches, self.mious, 'red', linewidth = 2, label='train miou')\n\n plt.grid(True)\n plt.xlabel('Epoch')\n plt.ylabel('Miou')\n plt.title('A Miou Curve')\n plt.legend(loc=\"upper right\")\n\n plt.savefig(os.path.join(self.log_dir, \"epoch_miou.png\"))\n plt.cla()\n plt.close(\"all\")\n\n print(\"Get miou done.\")\n shutil.rmtree(self.miou_out_path)" }, { "identifier": "DeeplabDataset", "path": "utils/dataloader.py", "snippet": "class DeeplabDataset(Dataset):\n def __init__(self, annotation_lines, input_shape, num_classes, train, dataset_path):\n super(DeeplabDataset, self).__init__()\n self.annotation_lines = annotation_lines\n self.length = len(annotation_lines)\n self.input_shape = input_shape\n self.num_classes = num_classes\n self.train = train\n self.dataset_path = dataset_path\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n annotation_line = self.annotation_lines[index]\n name = annotation_line.split()[0]\n\n #-------------------------------#\n # 从文件中读取图像\n #-------------------------------#\n jpg = Image.open(os.path.join(os.path.join(self.dataset_path, \"VOC2007/JPEGImages\"), name + \".jpg\"))\n png = Image.open(os.path.join(os.path.join(self.dataset_path, \"VOC2007/SegmentationClass\"), name + \".png\"))\n #-------------------------------#\n # 数据增强\n #-------------------------------#\n jpg, png = self.get_random_data(jpg, png, self.input_shape, random = self.train)\n\n jpg = np.transpose(preprocess_input(np.array(jpg, np.float64)), [2,0,1])\n png = np.array(png)\n png[png >= self.num_classes] = self.num_classes\n #-------------------------------------------------------#\n # 转化成one_hot的形式\n # 在这里需要+1是因为voc数据集有些标签具有白边部分\n # 我们需要将白边部分进行忽略,+1的目的是方便忽略。\n #-------------------------------------------------------#\n seg_labels = np.eye(self.num_classes + 1)[png.reshape([-1])]\n seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1))\n\n return jpg, png, seg_labels\n\n def rand(self, a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n def get_random_data(self, image, label, input_shape, jitter=.3, hue=.1, sat=0.7, val=0.3, random=True):\n image = cvtColor(image)\n label = Image.fromarray(np.array(label))\n #------------------------------#\n # 获得图像的高宽与目标高宽\n #------------------------------#\n iw, ih = image.size\n h, w = input_shape\n\n if not random:\n iw, ih = image.size\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', [w, h], (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n\n label = label.resize((nw,nh), Image.NEAREST)\n new_label = Image.new('L', [w, h], (0))\n new_label.paste(label, ((w-nw)//2, (h-nh)//2))\n return new_image, new_label\n\n #------------------------------------------#\n # 对图像进行缩放并且进行长和宽的扭曲\n #------------------------------------------#\n new_ar = iw/ih * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)\n scale = self.rand(0.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n label = label.resize((nw,nh), Image.NEAREST)\n \n #------------------------------------------#\n # 翻转图像\n #------------------------------------------#\n flip = self.rand()<.5\n if flip: \n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n label = label.transpose(Image.FLIP_LEFT_RIGHT)\n \n #------------------------------------------#\n # 将图像多余的部分加上灰条\n #------------------------------------------#\n dx = int(self.rand(0, w-nw))\n dy = int(self.rand(0, h-nh))\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_label = Image.new('L', (w,h), (0))\n new_image.paste(image, (dx, dy))\n new_label.paste(label, (dx, dy))\n image = new_image\n label = new_label\n\n image_data = np.array(image, np.uint8)\n\n #------------------------------------------#\n # 高斯模糊\n #------------------------------------------#\n blur = self.rand() < 0.25\n if blur: \n image_data = cv2.GaussianBlur(image_data, (5, 5), 0)\n\n #------------------------------------------#\n # 旋转\n #------------------------------------------#\n rotate = self.rand() < 0.25\n if rotate: \n center = (w // 2, h // 2)\n rotation = np.random.randint(-10, 11)\n M = cv2.getRotationMatrix2D(center, -rotation, scale=1)\n image_data = cv2.warpAffine(image_data, M, (w, h), flags=cv2.INTER_CUBIC, borderValue=(128,128,128))\n label = cv2.warpAffine(np.array(label, np.uint8), M, (w, h), flags=cv2.INTER_NEAREST, borderValue=(0))\n\n #---------------------------------#\n # 对图像进行色域变换\n # 计算色域变换的参数\n #---------------------------------#\n r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1\n #---------------------------------#\n # 将图像转到HSV上\n #---------------------------------#\n hue, sat, val = cv2.split(cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV))\n dtype = image_data.dtype\n #---------------------------------#\n # 应用变换\n #---------------------------------#\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n image_data = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n image_data = cv2.cvtColor(image_data, cv2.COLOR_HSV2RGB)\n \n return image_data, label" }, { "identifier": "deeplab_dataset_collate", "path": "utils/dataloader.py", "snippet": "def deeplab_dataset_collate(batch):\n images = []\n pngs = []\n seg_labels = []\n for img, png, labels in batch:\n images.append(img)\n pngs.append(png)\n seg_labels.append(labels)\n images = torch.from_numpy(np.array(images)).type(torch.FloatTensor)\n pngs = torch.from_numpy(np.array(pngs)).long()\n seg_labels = torch.from_numpy(np.array(seg_labels)).type(torch.FloatTensor)\n return images, pngs, seg_labels" }, { "identifier": "download_weights", "path": "utils/utils.py", "snippet": "def download_weights(backbone, model_dir=\"./model_data\"):\n import os\n from torch.hub import load_state_dict_from_url\n \n download_urls = {\n 'mobilenet' : 'https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/mobilenet_v2.pth.tar',\n 'xception' : 'https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/xception_pytorch_imagenet.pth',\n }\n url = download_urls[backbone]\n \n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n load_state_dict_from_url(url, model_dir)" }, { "identifier": "show_config", "path": "utils/utils.py", "snippet": "def show_config(**kwargs):\n print('Configurations:')\n print('-' * 70)\n print('|%25s | %40s|' % ('keys', 'values'))\n print('-' * 70)\n for key, value in kwargs.items():\n print('|%25s | %40s|' % (str(key), str(value)))\n print('-' * 70)" }, { "identifier": "fit_one_epoch", "path": "utils/utils_fit.py", "snippet": "def fit_one_epoch(model_train, model, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, dice_loss, focal_loss, cls_weights, num_classes, \\\n fp16, scaler, save_period, save_dir, local_rank=0):\n total_loss = 0\n total_f_score = 0\n\n val_loss = 0\n val_f_score = 0\n\n if local_rank == 0:\n print('Start Train')\n pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\n model_train.train()\n for iteration, batch in enumerate(gen):\n if iteration >= epoch_step: \n break\n imgs, pngs, labels = batch\n\n with torch.no_grad():\n weights = torch.from_numpy(cls_weights)\n if cuda:\n imgs = imgs.cuda(local_rank)\n pngs = pngs.cuda(local_rank)\n labels = labels.cuda(local_rank)\n weights = weights.cuda(local_rank)\n #----------------------#\n # 清零梯度\n #----------------------#\n optimizer.zero_grad()\n if not fp16:\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n\n with torch.no_grad():\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n\n #----------------------#\n # 反向传播\n #----------------------#\n loss.backward()\n optimizer.step()\n else:\n from torch.cuda.amp import autocast\n with autocast():\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n\n with torch.no_grad():\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n \n #----------------------#\n # 反向传播\n #----------------------#\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n total_loss += loss.item()\n total_f_score += _f_score.item()\n \n if local_rank == 0:\n pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1), \n 'f_score' : total_f_score / (iteration + 1),\n 'lr' : get_lr(optimizer)})\n pbar.update(1)\n\n if local_rank == 0:\n pbar.close()\n print('Finish Train')\n print('Start Validation')\n pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\n\n model_train.eval()\n for iteration, batch in enumerate(gen_val):\n if iteration >= epoch_step_val:\n break\n imgs, pngs, labels = batch\n with torch.no_grad():\n weights = torch.from_numpy(cls_weights)\n if cuda:\n imgs = imgs.cuda(local_rank)\n pngs = pngs.cuda(local_rank)\n labels = labels.cuda(local_rank)\n weights = weights.cuda(local_rank)\n\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n\n val_loss += loss.item()\n val_f_score += _f_score.item()\n \n if local_rank == 0:\n pbar.set_postfix(**{'val_loss' : val_loss / (iteration + 1),\n 'f_score' : val_f_score / (iteration + 1),\n 'lr' : get_lr(optimizer)})\n pbar.update(1)\n \n if local_rank == 0:\n pbar.close()\n print('Finish Validation')\n loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)\n eval_callback.on_epoch_end(epoch + 1, model_train)\n print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))\n print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))\n \n #-----------------------------------------------#\n # 保存权值\n #-----------------------------------------------#\n if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:\n torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)))\n\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\n print('Save best model to best_epoch_weights.pth')\n torch.save(model.state_dict(), os.path.join(save_dir, \"best_epoch_weights.pth\"))\n \n torch.save(model.state_dict(), os.path.join(save_dir, \"last_epoch_weights.pth\"))" } ]
import os import datetime import numpy as np import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim as optim from torch.utils.data import DataLoader from nets.deeplabv3_plus import DeepLab from nets.deeplabv3_training import (get_lr_scheduler, set_optimizer_lr, weights_init) from utils.callbacks import LossHistory, EvalCallback from utils.dataloader import DeeplabDataset, deeplab_dataset_collate from utils.utils import download_weights, show_config from utils.utils_fit import fit_one_epoch from torch.cuda.amp import GradScaler as GradScaler
12,464
#------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone) model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained) if not pretrained: weights_init(model) if model_path != '': #------------------------------------------------------# # 权值文件请看README,百度网盘下载 #------------------------------------------------------# if local_rank == 0: print('Load weights {}.'.format(model_path)) #------------------------------------------------------# # 根据预训练权重的Key和模型的Key进行加载 #------------------------------------------------------# model_dict = model.state_dict() pretrained_dict = torch.load(model_path, map_location = device) load_key, no_load_key, temp_dict = [], [], {} for k, v in pretrained_dict.items(): if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v): temp_dict[k] = v load_key.append(k) else: no_load_key.append(k) model_dict.update(temp_dict) model.load_state_dict(model_dict) #------------------------------------------------------# # 显示没有匹配上的Key #------------------------------------------------------# if local_rank == 0: print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key)) print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key)) print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m") #----------------------# # 记录Loss #----------------------# if local_rank == 0: time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S') log_dir = os.path.join(save_dir, "loss_" + str(time_str)) loss_history = LossHistory(log_dir, model, input_shape=input_shape) else: loss_history = None #------------------------------------------------------------------# # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 # 因此torch1.2这里显示"could not be resolve" #------------------------------------------------------------------# if fp16: scaler = GradScaler() else: scaler = None model_train = model.train() #----------------------------# # 多卡同步Bn #----------------------------# if sync_bn and ngpus_per_node > 1 and distributed: model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) elif sync_bn: print("Sync_bn is not support in one gpu or not distributed.") if Cuda: if distributed: #----------------------------# # 多卡平行运行 #----------------------------# model_train = model_train.cuda(local_rank) model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) else: model_train = torch.nn.DataParallel(model) cudnn.benchmark = True model_train = model_train.cuda() #---------------------------# # 读取数据集对应的txt #---------------------------# with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: train_lines = f.readlines() with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: val_lines = f.readlines() num_train = len(train_lines) num_val = len(val_lines) if local_rank == 0:
''' 训练自己的语义分割模型一定需要注意以下几点: 1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签 输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。 灰度图会自动转成RGB图片进行训练,无需自己修改。 输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。 标签为png图片,无需固定大小,传入训练前会自动进行resize。 由于许多同学的数据集是网络上下载的,标签格式并不符合,需要再度处理。一定要注意!标签的每个像素点的值就是这个像素点所属的种类。 网上常见的数据集总共对输入图片分两类,背景的像素点值为0,目标的像素点值为255。这样的数据集可以正常运行但是预测是没有效果的! 需要改成,背景的像素点值为0,目标的像素点值为1。 如果格式有误,参考:https://github.com/bubbliiiing/segmentation-format-fix 2、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。 损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。 训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中 3、训练好的权值文件保存在logs文件夹中,每个训练世代(Epoch)包含若干训练步长(Step),每个训练步长(Step)进行一次梯度下降。 如果只是训练了几个Step是不会保存的,Epoch和Step的概念要捋清楚一下。 ''' if __name__ == "__main__": #---------------------------------# # Cuda 是否使用Cuda # 没有GPU可以设置成False #---------------------------------# Cuda = True #---------------------------------------------------------------------# # distributed 用于指定是否使用单机多卡分布式运行 # 终端指令仅支持Ubuntu。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。 # Windows系统下默认使用DP模式调用所有显卡,不支持DDP。 # DP模式: # 设置 distributed = False # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python train.py # DDP模式: # 设置 distributed = True # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py #---------------------------------------------------------------------# distributed = False #---------------------------------------------------------------------# # sync_bn 是否使用sync_bn,DDP模式多卡可用 #---------------------------------------------------------------------# sync_bn = False #---------------------------------------------------------------------# # fp16 是否使用混合精度训练 # 可减少约一半的显存、需要pytorch1.7.1以上 #---------------------------------------------------------------------# fp16 = False #-----------------------------------------------------# # num_classes 训练自己的数据集必须要修改的 # 自己需要的分类个数+1,如2+1 #-----------------------------------------------------# num_classes = 3 #---------------------------------# # 所使用的的主干网络: # mobilenet # xception #---------------------------------# backbone = "mobilenet" #----------------------------------------------------------------------------------------------------------------------------# # pretrained 是否使用主干网络的预训练权重,此处使用的是主干的权重,因此是在模型构建的时候进行加载的。 # 如果设置了model_path,则主干的权值无需加载,pretrained的值无意义。 # 如果不设置model_path,pretrained = True,此时仅加载主干开始训练。 # 如果不设置model_path,pretrained = False,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 #----------------------------------------------------------------------------------------------------------------------------# pretrained = False #----------------------------------------------------------------------------------------------------------------------------# # 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。 # 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。 # 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好 # 训练自己的数据集时提示维度不匹配正常,预测的东西都不一样了自然维度不匹配 # # 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。 # 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。 # # 当model_path = ''的时候不加载整个模型的权值。 # # 此处使用的是整个模型的权重,因此是在train.py进行加载的,pretrain不影响此处的权值加载。 # 如果想要让模型从主干的预训练权值开始训练,则设置model_path = '',pretrain = True,此时仅加载主干。 # 如果想要让模型从0开始训练,则设置model_path = '',pretrain = Fasle,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 # # 一般来讲,网络从0开始的训练效果会很差,因为权值太过随机,特征提取效果不明显,因此非常、非常、非常不建议大家从0开始训练! # 如果一定要从0开始,可以了解imagenet数据集,首先训练分类模型,获得网络的主干部分权值,分类模型的 主干部分 和该模型通用,基于此进行训练。 #----------------------------------------------------------------------------------------------------------------------------# model_path = "model_data/deeplab_mobilenetv2.pth" #---------------------------------------------------------# # downsample_factor 下采样的倍数8、16 # 8下采样的倍数较小、理论上效果更好。 # 但也要求更大的显存 #---------------------------------------------------------# downsample_factor = 8 #------------------------------# # 输入图片的大小 #------------------------------# input_shape = [512, 512] #----------------------------------------------------------------------------------------------------------------------------# # 训练分为两个阶段,分别是冻结阶段和解冻阶段。设置冻结阶段是为了满足机器性能不足的同学的训练需求。 # 冻结训练需要的显存较小,显卡非常差的情况下,可设置Freeze_Epoch等于UnFreeze_Epoch,此时仅仅进行冻结训练。 # # 在此提供若干参数设置建议,各位训练者根据自己的需求进行灵活调整: # (一)从整个模型的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:UnFreeze_Epoch可以在100-300之间调整。 # (二)从主干网络的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 120,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 120,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:由于从主干网络的预训练权重开始训练,主干的权值不一定适合语义分割,需要更多的训练跳出局部最优解。 # UnFreeze_Epoch可以在120-300之间调整。 # Adam相较于SGD收敛的快一些。因此UnFreeze_Epoch理论上可以小一点,但依然推荐更多的Epoch。 # (三)batch_size的设置: # 在显卡能够接受的范围内,以大为好。显存不足与数据集大小无关,提示显存不足(OOM或者CUDA out of memory)请调小batch_size。 # 受到BatchNorm层影响,batch_size最小为2,不能为1。 # 正常情况下Freeze_batch_size建议为Unfreeze_batch_size的1-2倍。不建议设置的差距过大,因为关系到学习率的自动调整。 #----------------------------------------------------------------------------------------------------------------------------# #------------------------------------------------------------------# # 冻结阶段训练参数 # 此时模型的主干被冻结了,特征提取网络不发生改变 # 占用的显存较小,仅对网络进行微调 # Init_Epoch 模型当前开始的训练世代,其值可以大于Freeze_Epoch,如设置: # Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100 # 会跳过冻结阶段,直接从60代开始,并调整对应的学习率。 # (断点续练时使用) # Freeze_Epoch 模型冻结训练的Freeze_Epoch # (当Freeze_Train=False时失效) # Freeze_batch_size 模型冻结训练的batch_size # (当Freeze_Train=False时失效) #------------------------------------------------------------------# Init_Epoch = 0 Freeze_Epoch = 10 Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone) model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained) if not pretrained: weights_init(model) if model_path != '': #------------------------------------------------------# # 权值文件请看README,百度网盘下载 #------------------------------------------------------# if local_rank == 0: print('Load weights {}.'.format(model_path)) #------------------------------------------------------# # 根据预训练权重的Key和模型的Key进行加载 #------------------------------------------------------# model_dict = model.state_dict() pretrained_dict = torch.load(model_path, map_location = device) load_key, no_load_key, temp_dict = [], [], {} for k, v in pretrained_dict.items(): if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v): temp_dict[k] = v load_key.append(k) else: no_load_key.append(k) model_dict.update(temp_dict) model.load_state_dict(model_dict) #------------------------------------------------------# # 显示没有匹配上的Key #------------------------------------------------------# if local_rank == 0: print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key)) print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key)) print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m") #----------------------# # 记录Loss #----------------------# if local_rank == 0: time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S') log_dir = os.path.join(save_dir, "loss_" + str(time_str)) loss_history = LossHistory(log_dir, model, input_shape=input_shape) else: loss_history = None #------------------------------------------------------------------# # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 # 因此torch1.2这里显示"could not be resolve" #------------------------------------------------------------------# if fp16: scaler = GradScaler() else: scaler = None model_train = model.train() #----------------------------# # 多卡同步Bn #----------------------------# if sync_bn and ngpus_per_node > 1 and distributed: model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) elif sync_bn: print("Sync_bn is not support in one gpu or not distributed.") if Cuda: if distributed: #----------------------------# # 多卡平行运行 #----------------------------# model_train = model_train.cuda(local_rank) model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) else: model_train = torch.nn.DataParallel(model) cudnn.benchmark = True model_train = model_train.cuda() #---------------------------# # 读取数据集对应的txt #---------------------------# with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: train_lines = f.readlines() with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: val_lines = f.readlines() num_train = len(train_lines) num_val = len(val_lines) if local_rank == 0:
show_config(
9
2023-11-17 13:25:28+00:00
16k
fg320/DEASC
examples/12B_5x1_farm_dyn_tuning_wso_grouping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info" }, { "identifier": "WSOpt", "path": "deasc/wake_steering.py", "snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)" }, { "identifier": "GPWrap", "path": "deasc/gp.py", "snippet": "class GPWrap:\n \"\"\"\n Wrapper class to create, modify and visualise Gaussian Processes for dynamic parameter\n tuning. Currently limited to a single output GP.\n \"\"\"\n\n def __init__(self, parameter_class, parameter_name, dimensions):\n self.parameter_class = parameter_class\n self.parameter_name = parameter_name\n self.dimensions = dimensions\n \"\"\"\n Args\n ----\n parameter_class: string\n Parameter class of the optimal parameter to fit.\n parameter_name: string\n Name of the optimal parameter to fit.\n dimensions: integer\n Dimensions/inputs/variables of the GP.\n \"\"\"\n\n def GP_so(self, yaw_data, param_data, num_restarts=50, noise=0.05):\n \"\"\"\n Construct and returns a single-output (SO) GP for the given input dataset\n (optimal parameter for a given yaw configuration).\n\n Args\n ----\n yaw_data: list of lists\n list of input yaw configurations for which parameter has been tuned\n param_data: list of lists\n for each yaw configuration in yaw_data, list containing the optimal parameter\n num_restarts: int\n number of random starts of the GP hyperparameter tuning optimization\n noise: float\n noise in output prediction. Default is 0.05\n\n Returns\n -------\n m: GPy single-output Gaussian Process model\n \"\"\"\n # Sample check on argument dimension\n if len(yaw_data[0]) != self.dimensions:\n err_msg = (\"Yaw input and GP dimensions do not match\")\n raise Exception(err_msg)\n if len(param_data[0]) != 1:\n err_msg = (\"Single-output GPs only\")\n raise Exception(err_msg)\n\n # Data structure arguments\n yaw_data_GP = np.array(yaw_data)\n param_data_GP = np.array(param_data)\n\n # GP model\n kernel = GPy.kern.RBF(input_dim=self.dimensions, variance=1., lengthscale=1.)\n self.m = GPy.models.GPRegression(yaw_data_GP,\n param_data_GP,\n kernel,\n noise_var=noise)\n\n # Hyperparameter tuning\n self.m.optimize(optimizer=None, # Default lbfgsb\n start=None,\n messages=False,\n max_iters=1000)\n self.m.optimize_restarts(num_restarts=num_restarts)\n return self.m\n\n def GP_so_plot(self, parameter_range_plot, yaw_range_plot):\n \"\"\"\n Plot a single-output (SO) GP model. 1D and 2D plots are generated for each\n variable combination.\n\n Args\n ----\n parameter_range: tuple\n range of the optimal parameter to plot\n parameter_range: tuple\n range of the yaw variables to plot\n \"\"\"\n # Plotting library choice and defaults values\n GPy.plotting.change_plotting_library('matplotlib')\n GPy.plotting.matplot_dep.defaults.data_2d = {'s': 0,\n 'edgecolors': 'none',\n 'linewidth': 0.0,\n 'cmap': cm.get_cmap('hot'),\n 'alpha': 0.5}\n\n # 1D Plots\n if self.dimensions == 1:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(5, 2.5))\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n fig = self.m.plot(figure=figure,\n col=1,\n row=1,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=True)\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n figsize = (5*n_cuts, 2.5*self.dimensions)\n figure = GPy.plotting.plotting_library().figure(self.dimensions,\n n_cuts,\n figsize=figsize)\n\n for dim_idx in range(self.dimensions):\n for i, slice_single in zip(range(n_cuts), slices):\n title = \"GP %s - $\\gamma_{others}$\" \\\n \"%.1f $^{\\circ}$\" % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (dim_idx+1)\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n inputs = []\n for j in range(self.dimensions):\n if j == dim_idx:\n pass\n else:\n inputs.append((j, slice_single))\n fig = self.m.plot(figure=figure,\n col=(i+1),\n row=(dim_idx+1),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=False)\n\n # 2D Plots\n # Countours are fine ##\n # Data points (training) plotted are off ##\n # double checked with GP and training database ##\n if self.dimensions == 1:\n pass\n elif self.dimensions == 2:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(3, 2.5))\n\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$\\gamma_{2}$ [deg]'\n\n fig = self.m.plot(figure=figure,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n plot_rows = self.dimensions-1\n plot_cols = self.dimensions-1\n combinations = list(itertools.combinations(\n list(range(0, self.dimensions)), 2))\n\n figsize = (3*plot_cols*len(slices), 2.5*plot_rows)\n figure = GPy.plotting.plotting_library().figure(plot_rows,\n plot_cols*len(slices),\n figsize=figsize)\n for i, slice_single in zip(range(n_cuts), slices):\n for comb_idx, comb in enumerate(combinations):\n title = 'GP %s - $\\gamma_{others}$' \\\n '%.1f $^{\\circ}$' % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (comb[0]+1)\n ylabel = '$\\gamma_{%i}$ [deg]' % (comb[1]+1)\n inputs = []\n for j in range(self.dimensions):\n if j in comb:\n pass\n else:\n inputs.append((j, slice_single))\n\n fig = self.m.plot(figure=figure,\n col=(comb[0]+1+plot_cols*i),\n row=(comb[1]),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))" }, { "identifier": "TuningDyn_Grouping", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Grouping(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"Class for dynamic parameter tuning with grouping of turbines within a wind farm.\"\"\"\n\n def __init__(self, param_class, param_name, tuning_groups, GP_model):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_groups: (list of lists) list of turbine groups included in the tuning. In\n each list, specify the turbines in the group.\n GP_model: (GPy object) GP model with len(tuning_groups) input dimensions.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_groups\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n # GP dimension check\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Grouping info\n self.tuning_groups = tuning_groups\n self.grouping_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return [x for sublist in self.tuning_variables for x in sublist]\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._tuning_groups_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_groups(self.tuning_groups, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def set_yaw_groups(self, yaw_angles):\n \"\"\"\n Force yaw angles of turbines in tuning groups to be equal in the wake\n steering optimisation.\n\n Args\n ----\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n yaw_angles_grouped: (np.ndarray) yaw angles of all turbines in the wind farm with\n equal yaw angles in each turbine group.\n \"\"\"\n return self._set_yaw_groups(yaw_angles)" }, { "identifier": "floris_extract_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()" }, { "identifier": "floris_param_change_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new" }, { "identifier": "floris_param_change_object", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model" } ]
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc.utils_floris import ( floris_extract_object_dict, floris_param_change_object_dict, floris_param_change_object )
11,439
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3)
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3)
wf_model = floris_param_change_object(wf_model, wf_model_dict)
6
2023-11-10 18:13:27+00:00
16k
CPES-Power-and-Energy-Systems/interoperable-recommender-tso
energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py
[ { "identifier": "GaussianProcess", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n This class will be removed in 0.20.\n Use the :class:`GaussianProcessRegressor` instead.\n\n Read more in the :ref:`User Guide <gaussian_process>`.\n\n Parameters\n ----------\n regr : string or callable, optional\n A regression function returning an array of outputs of the linear\n regression functional basis. The number of observations n_samples\n should be greater than the size p of this basis.\n Default assumes a simple constant regression trend.\n Available built-in regression models are::\n\n 'constant', 'linear', 'quadratic'\n\n corr : string or callable, optional\n A stationary autocorrelation function returning the autocorrelation\n between two points x and x'.\n Default assumes a squared-exponential autocorrelation model.\n Built-in correlation models are::\n\n 'absolute_exponential', 'squared_exponential',\n 'generalized_exponential', 'cubic', 'linear'\n\n beta0 : double array_like, optional\n The regression weight vector to perform Ordinary Kriging (OK).\n Default assumes Universal Kriging (UK) so that the vector beta of\n regression weights is estimated using the maximum likelihood\n principle.\n\n storage_mode : string, optional\n A string specifying whether the Cholesky decomposition of the\n correlation matrix should be stored in the class (storage_mode =\n 'full') or not (storage_mode = 'light').\n Default assumes storage_mode = 'full', so that the\n Cholesky decomposition of the correlation matrix is stored.\n This might be a useful parameter when one is not interested in the\n MSE and only plan to estimate the BLUP, for which the correlation\n matrix is not required.\n\n verbose : boolean, optional\n A boolean specifying the verbose level.\n Default is verbose = False.\n\n theta0 : double array_like, optional\n An array with shape (n_features, ) or (1, ).\n The parameters in the autocorrelation model.\n If thetaL and thetaU are also specified, theta0 is considered as\n the starting point for the maximum likelihood estimation of the\n best set of parameters.\n Default assumes isotropic autocorrelation model with theta0 = 1e-1.\n\n thetaL : double array_like, optional\n An array with shape matching theta0's.\n Lower bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n thetaU : double array_like, optional\n An array with shape matching theta0's.\n Upper bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n normalize : boolean, optional\n Input X and observations y are centered and reduced wrt\n means and standard deviations estimated from the n_samples\n observations provided.\n Default is normalize = True so that data is normalized to ease\n maximum likelihood estimation.\n\n nugget : double or ndarray, optional\n Introduce a nugget effect to allow smooth predictions from noisy\n data. If nugget is an ndarray, it must be the same length as the\n number of data points used for the fit.\n The nugget is added to the diagonal of the assumed training covariance;\n in this way it acts as a Tikhonov regularization in the problem. In\n the special case of the squared exponential correlation function, the\n nugget mathematically represents the variance of the input values.\n Default assumes a nugget close to machine precision for the sake of\n robustness (nugget = 10. * MACHINE_EPSILON).\n\n optimizer : string, optional\n A string specifying the optimization algorithm to be used.\n Default uses 'fmin_cobyla' algorithm from scipy.optimize.\n Available optimizers are::\n\n 'fmin_cobyla', 'Welch'\n\n 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.\n It consists in iterating over several one-dimensional optimizations\n instead of running one single multi-dimensional optimization.\n\n random_start : int, optional\n The number of times the Maximum Likelihood Estimation should be\n performed from a random starting point.\n The first MLE always uses the specified starting point (theta0),\n the next starting points are picked at random according to an\n exponential distribution (log-uniform on [thetaL, thetaU]).\n Default does not use random starting point (random_start = 1).\n\n random_state : int, RandomState instance or None, optional (default=None)\n The generator used to shuffle the sequence of coordinates of theta in\n the Welch optimizer. If int, random_state is the seed used by the\n random number generator; If RandomState instance, random_state is the\n random number generator; If None, the random number generator is the\n RandomState instance used by `np.random`.\n\n Attributes\n ----------\n theta_ : array\n Specified theta OR the best set of autocorrelation parameters (the \\\n sought maximizer of the reduced likelihood function).\n\n reduced_likelihood_function_value_ : array\n The optimal reduced likelihood function value.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.gaussian_process import GaussianProcess\n >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T\n >>> y = (X * np.sin(X)).ravel()\n >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)\n >>> gp.fit(X, y) # doctest: +ELLIPSIS\n GaussianProcess(beta0=None...\n ...\n\n Notes\n -----\n The presentation implementation is based on a translation of the DACE\n Matlab toolbox, see reference [NLNS2002]_.\n\n References\n ----------\n\n .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.\n Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)\n http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf\n\n .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,\n and M.D. Morris (1992). Screening, predicting, and computer\n experiments. Technometrics, 34(1) 15--25.`\n http://www.jstor.org/stable/1269548\n \"\"\"\n\n _regression_types = {\n 'constant': regression.constant,\n 'linear': regression.linear,\n 'quadratic': regression.quadratic}\n\n _correlation_types = {\n 'absolute_exponential': correlation.absolute_exponential,\n 'squared_exponential': correlation.squared_exponential,\n 'generalized_exponential': correlation.generalized_exponential,\n 'cubic': correlation.cubic,\n 'linear': correlation.linear}\n\n _optimizer_types = [\n 'fmin_cobyla',\n 'Welch']\n\n def __init__(self, regr='constant', corr='squared_exponential', beta0=None,\n storage_mode='full', verbose=False, theta0=1e-1,\n thetaL=None, thetaU=None, optimizer='fmin_cobyla',\n random_start=1, normalize=True,\n nugget=10. * MACHINE_EPSILON, random_state=None):\n\n self.regr = regr\n self.corr = corr\n self.beta0 = beta0\n self.storage_mode = storage_mode\n self.verbose = verbose\n self.theta0 = theta0\n self.thetaL = thetaL\n self.thetaU = thetaU\n self.normalize = normalize\n self.nugget = nugget\n self.optimizer = optimizer\n self.random_start = random_start\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"\n The Gaussian Process model fitting method.\n\n Parameters\n ----------\n X : double array_like\n An array with shape (n_samples, n_features) with the input at which\n observations were made.\n\n y : double array_like\n An array with shape (n_samples, ) or shape (n_samples, n_targets)\n with the observations of the output to be predicted.\n\n Returns\n -------\n gp : self\n A fitted Gaussian Process model object awaiting data to perform\n predictions.\n \"\"\"\n # Run input checks\n self._check_params()\n\n self.random_state = check_random_state(self.random_state)\n\n # Force data to 2D numpy.array\n X, y = check_X_y(X, y, multi_output=True, y_numeric=True)\n self.y_ndim_ = y.ndim\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n # Check shapes of DOE & observations\n n_samples, n_features = X.shape\n _, n_targets = y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n # Normalize data or don't\n if self.normalize:\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n y_mean = np.mean(y, axis=0)\n y_std = np.std(y, axis=0)\n X_std[X_std == 0.] = 1.\n y_std[y_std == 0.] = 1.\n # center and scale X if necessary\n X = (X - X_mean) / X_std\n y = (y - y_mean) / y_std\n else:\n X_mean = np.zeros(1)\n X_std = np.ones(1)\n y_mean = np.zeros(1)\n y_std = np.ones(1)\n\n # Calculate matrix of distances D between samples\n D, ij = l1_cross_distances(X)\n if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): # noqa\n raise Exception(\"Multiple input features cannot have the same\"\n \" target value.\")\n\n # Regression matrix and parameters\n F = self.regr(X)\n n_samples_F = F.shape[0]\n if F.ndim > 1:\n p = F.shape[1]\n else:\n p = 1\n if n_samples_F != n_samples:\n raise Exception(\"Number of rows in F and X do not match. Most \"\n \"likely something is going wrong with the \"\n \"regression model.\")\n if p > n_samples_F:\n raise Exception((\"Ordinary least squares problem is undetermined \"\n \"n_samples=%d must be greater than the \"\n \"regression model size p=%d.\") % (n_samples, p))\n if self.beta0 is not None:\n if self.beta0.shape[0] != p:\n raise Exception(\"Shapes of beta0 and F do not match.\")\n\n # Set attributes\n self.X = X\n self.y = y\n self.D = D\n self.ij = ij\n self.F = F\n self.X_mean, self.X_std = X_mean, X_std\n self.y_mean, self.y_std = y_mean, y_std\n\n # Determine Gaussian Process model parameters\n if self.thetaL is not None and self.thetaU is not None:\n # Maximum Likelihood Estimation of the parameters\n if self.verbose:\n print(\"Performing Maximum Likelihood Estimation of the \"\n \"autocorrelation parameters...\")\n self.theta_, self.reduced_likelihood_function_value_, par = \\\n self._arg_max_reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad parameter region. \"\n \"Try increasing upper bound\")\n\n else:\n # Given parameters\n if self.verbose:\n print(\"Given autocorrelation parameters. \"\n \"Computing Gaussian Process model parameters...\")\n self.theta_ = self.theta0\n self.reduced_likelihood_function_value_, par = \\\n self.reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad point. Try increasing theta0.\")\n\n self.beta = par['beta']\n self.gamma = par['gamma']\n self.sigma2 = par['sigma2']\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n if self.storage_mode == 'light':\n # Delete heavy data (it will be computed again if required)\n # (it is required only when MSE is wanted in self.predict)\n if self.verbose:\n print(\"Light storage mode specified. \"\n \"Flushing autocorrelation matrix...\")\n self.D = None\n self.ij = None\n self.F = None\n self.C = None\n self.Ft = None\n self.G = None\n\n return self\n\n def predict(self, X, eval_MSE=False, batch_size=None):\n \"\"\"\n This function evaluates the Gaussian Process model at x.\n\n Parameters\n ----------\n X : array_like\n An array with shape (n_eval, n_features) giving the point(s) at\n which the prediction(s) should be made.\n\n eval_MSE : boolean, optional\n A boolean specifying whether the Mean Squared Error should be\n evaluated or not.\n Default assumes evalMSE = False and evaluates only the BLUP (mean\n prediction).\n\n batch_size : integer, optional\n An integer giving the maximum number of points that can be\n evaluated simultaneously (depending on the available memory).\n Default is None so that all given points are evaluated at the same\n time.\n\n Returns\n -------\n y : array_like, shape (n_samples, ) or (n_samples, n_targets)\n An array with shape (n_eval, ) if the Gaussian Process was trained\n on an array of shape (n_samples, ) or an array with shape\n (n_eval, n_targets) if the Gaussian Process was trained on an array\n of shape (n_samples, n_targets) with the Best Linear Unbiased\n Prediction at x.\n\n MSE : array_like, optional (if eval_MSE == True)\n An array with shape (n_eval, ) or (n_eval, n_targets) as with y,\n with the Mean Squared Error at x.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n # Check input shapes\n X = check_array(X)\n n_eval, _ = X.shape\n n_samples, n_features = self.X.shape\n n_samples_y, n_targets = self.y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n if X.shape[1] != n_features:\n raise ValueError((\"The number of features in X (X.shape[1] = %d) \"\n \"should match the number of features used \"\n \"for fit() \"\n \"which is %d.\") % (X.shape[1], n_features))\n\n if batch_size is None:\n # No memory management\n # (evaluates all given points in a single batch run)\n\n # Normalize input\n X = (X - self.X_mean) / self.X_std\n\n # Initialize output\n y = np.zeros(n_eval)\n if eval_MSE:\n MSE = np.zeros(n_eval)\n\n # Get pairwise componentwise L1-distances to the input training set\n dx = manhattan_distances(X, Y=self.X, sum_over_features=False)\n # Get regression function and correlation\n f = self.regr(X)\n r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)\n\n # Scaled predictor\n y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)\n\n # Predictor\n y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)\n\n if self.y_ndim_ == 1:\n y = y.ravel()\n\n # Mean Squared Error\n if eval_MSE:\n C = self.C\n if C is None:\n # Light storage mode (need to recompute C, F, Ft and G)\n if self.verbose:\n print(\"This GaussianProcess used 'light' storage mode \"\n \"at instantiation. Need to recompute \"\n \"autocorrelation matrix...\")\n reduced_likelihood_function_value, par = \\\n self.reduced_likelihood_function()\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n rt = linalg.solve_triangular(self.C, r.T, lower=True)\n\n if self.beta0 is None:\n # Universal Kriging\n u = linalg.solve_triangular(self.G.T,\n np.dot(self.Ft.T, rt) - f.T,\n lower=True)\n else:\n # Ordinary Kriging\n u = np.zeros((n_targets, n_eval))\n\n MSE = np.dot(self.sigma2.reshape(n_targets, 1),\n (1. - (rt ** 2.).sum(axis=0)\n + (u ** 2.).sum(axis=0))[np.newaxis, :])\n MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)\n\n # Mean Squared Error might be slightly negative depending on\n # machine precision: force to zero!\n MSE[MSE < 0.] = 0.\n\n if self.y_ndim_ == 1:\n MSE = MSE.ravel()\n\n return y, MSE\n\n else:\n\n return y\n\n else:\n # Memory management\n\n if type(batch_size) is not int or batch_size <= 0:\n raise Exception(\"batch_size must be a positive integer\")\n\n if eval_MSE:\n\n y, MSE = np.zeros(n_eval), np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to], MSE[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y, MSE\n\n else:\n\n y = np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y\n\n def reduced_likelihood_function(self, theta=None):\n \"\"\"\n This function determines the BLUP parameters and evaluates the reduced\n likelihood function for the given autocorrelation parameters theta.\n\n Maximizing this function wrt the autocorrelation parameters theta is\n equivalent to maximizing the likelihood of the assumed joint Gaussian\n distribution of the observations y evaluated onto the design of\n experiments X.\n\n Parameters\n ----------\n theta : array_like, optional\n An array containing the autocorrelation parameters at which the\n Gaussian Process model parameters should be determined.\n Default uses the built-in autocorrelation parameters\n (ie ``theta = self.theta_``).\n\n Returns\n -------\n reduced_likelihood_function_value : double\n The value of the reduced likelihood function associated to the\n given autocorrelation parameters theta.\n\n par : dict\n A dictionary containing the requested Gaussian Process model\n parameters:\n\n - ``sigma2`` is the Gaussian Process variance.\n - ``beta`` is the generalized least-squares regression weights for\n Universal Kriging or given beta0 for Ordinary Kriging.\n - ``gamma`` is the Gaussian Process weights.\n - ``C`` is the Cholesky decomposition of the correlation\n matrix [R].\n - ``Ft`` is the solution of the linear equation system\n [R] x Ft = F\n - ``G`` is the QR decomposition of the matrix Ft.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n if theta is None:\n # Use built-in autocorrelation parameters\n theta = self.theta_\n\n # Initialize output\n reduced_likelihood_function_value = - np.inf\n par = {}\n\n # Retrieve data\n n_samples = self.X.shape[0]\n D = self.D\n ij = self.ij\n F = self.F\n\n if D is None:\n # Light storage mode (need to recompute D, ij and F)\n D, ij = l1_cross_distances(self.X)\n if (np.min(np.sum(D, axis=1)) == 0.\n and self.corr != correlation.pure_nugget):\n raise Exception(\"Multiple X are not allowed\")\n F = self.regr(self.X)\n\n # Set up R\n r = self.corr(theta, D)\n R = np.eye(n_samples) * (1. + self.nugget)\n R[ij[:, 0], ij[:, 1]] = r\n R[ij[:, 1], ij[:, 0]] = r\n\n # Cholesky decomposition of R\n try:\n C = linalg.cholesky(R, lower=True)\n except linalg.LinAlgError:\n return reduced_likelihood_function_value, par\n\n # Get generalized least squares solution\n Ft = linalg.solve_triangular(C, F, lower=True)\n Q, G = linalg.qr(Ft, mode='economic')\n\n sv = linalg.svd(G, compute_uv=False)\n rcondG = sv[-1] / sv[0]\n if rcondG < 1e-10:\n # Check F\n sv = linalg.svd(F, compute_uv=False)\n condF = sv[0] / sv[-1]\n if condF > 1e15:\n raise Exception(\"F is too ill conditioned. Poor combination \"\n \"of regression model and observations.\")\n else:\n # Ft is too ill conditioned, get out (try different theta)\n return reduced_likelihood_function_value, par\n\n Yt = linalg.solve_triangular(C, self.y, lower=True)\n if self.beta0 is None:\n # Universal Kriging\n beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))\n else:\n # Ordinary Kriging\n beta = np.array(self.beta0)\n\n rho = Yt - np.dot(Ft, beta)\n sigma2 = (rho ** 2.).sum(axis=0) / n_samples\n # The determinant of R is equal to the squared product of the diagonal\n # elements of its Cholesky decomposition C\n detR = (np.diag(C) ** (2. / n_samples)).prod()\n\n # Compute/Organize output\n reduced_likelihood_function_value = - sigma2.sum() * detR\n par['sigma2'] = sigma2 * self.y_std ** 2.\n par['beta'] = beta\n par['gamma'] = linalg.solve_triangular(C.T, rho)\n par['C'] = C\n par['Ft'] = Ft\n par['G'] = G\n\n return reduced_likelihood_function_value, par\n\n def _arg_max_reduced_likelihood_function(self):\n \"\"\"\n This function estimates the autocorrelation parameters theta as the\n maximizer of the reduced likelihood function.\n (Minimization of the opposite reduced likelihood function is used for\n convenience)\n\n Parameters\n ----------\n self : All parameters are stored in the Gaussian Process model object.\n\n Returns\n -------\n optimal_theta : array_like\n The best set of autocorrelation parameters (the sought maximizer of\n the reduced likelihood function).\n\n optimal_reduced_likelihood_function_value : double\n The optimal reduced likelihood function value.\n\n optimal_par : dict\n The BLUP parameters associated to thetaOpt.\n \"\"\"\n\n # Initialize output\n best_optimal_theta = []\n best_optimal_rlf_value = []\n best_optimal_par = []\n\n if self.verbose:\n print(\"The chosen optimizer is: \" + str(self.optimizer))\n if self.random_start > 1:\n print(str(self.random_start) + \" random starts are required.\")\n\n percent_completed = 0.\n\n # Force optimizer to fmin_cobyla if the model is meant to be isotropic\n if self.optimizer == 'Welch' and self.theta0.size == 1:\n self.optimizer = 'fmin_cobyla'\n\n if self.optimizer == 'fmin_cobyla':\n\n def minus_reduced_likelihood_function(log10t):\n return - self.reduced_likelihood_function(\n theta=10. ** log10t)[0]\n\n constraints = []\n for i in range(self.theta0.size):\n constraints.append(lambda log10t, i=i:\n log10t[i] - np.log10(self.thetaL[0, i]))\n constraints.append(lambda log10t, i=i:\n np.log10(self.thetaU[0, i]) - log10t[i])\n\n for k in range(self.random_start):\n\n if k == 0:\n # Use specified starting point as first guess\n theta0 = self.theta0\n else:\n # Generate a random starting point log10-uniformly\n # distributed between bounds\n log10theta0 = (np.log10(self.thetaL)\n + self.random_state.rand(*self.theta0.shape)\n * np.log10(self.thetaU / self.thetaL))\n theta0 = 10. ** log10theta0\n\n # Run Cobyla\n try:\n log10_optimal_theta = \\\n optimize.fmin_cobyla(minus_reduced_likelihood_function,\n np.log10(theta0).ravel(),\n constraints)\n except ValueError as ve:\n print(\"Optimization failed. Try increasing the ``nugget``\")\n raise ve\n\n optimal_theta = 10. ** log10_optimal_theta\n optimal_rlf_value, optimal_par = \\\n self.reduced_likelihood_function(theta=optimal_theta)\n\n # Compare the new optimizer to the best previous one\n if k > 0:\n if optimal_rlf_value > best_optimal_rlf_value:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n else:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n if self.verbose and self.random_start > 1:\n if (20 * k) / self.random_start > percent_completed:\n percent_completed = (20 * k) / self.random_start\n print(\"%s completed\" % (5 * percent_completed))\n\n optimal_rlf_value = best_optimal_rlf_value\n optimal_par = best_optimal_par\n optimal_theta = best_optimal_theta\n\n elif self.optimizer == 'Welch':\n\n # Backup of the given attributes\n theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU\n corr = self.corr\n verbose = self.verbose\n\n # This will iterate over fmin_cobyla optimizer\n self.optimizer = 'fmin_cobyla'\n self.verbose = False\n\n # Initialize under isotropy assumption\n if verbose:\n print(\"Initialize under isotropy assumption...\")\n self.theta0 = check_array(self.theta0.min())\n self.thetaL = check_array(self.thetaL.min())\n self.thetaU = check_array(self.thetaU.max())\n theta_iso, optimal_rlf_value_iso, par_iso = \\\n self._arg_max_reduced_likelihood_function()\n optimal_theta = theta_iso + np.zeros(theta0.shape)\n\n # Iterate over all dimensions of theta allowing for anisotropy\n if verbose:\n print(\"Now improving allowing for anisotropy...\")\n for i in self.random_state.permutation(theta0.size):\n if verbose:\n print(\"Proceeding along dimension %d...\" % (i + 1))\n self.theta0 = check_array(theta_iso)\n self.thetaL = check_array(thetaL[0, i])\n self.thetaU = check_array(thetaU[0, i])\n\n def corr_cut(t, d):\n return corr(check_array(np.hstack(\n [\n optimal_theta[0][0:i], t[0],\n optimal_theta[0][(i + 1)::]\n ]\n )), d)\n\n self.corr = corr_cut\n optimal_theta[0, i], optimal_rlf_value, optimal_par = \\\n self._arg_max_reduced_likelihood_function()\n\n # Restore the given attributes\n self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU\n self.corr = corr\n self.optimizer = 'Welch'\n self.verbose = verbose\n\n else:\n\n raise NotImplementedError(\"This optimizer ('%s') is not \"\n \"implemented yet. Please contribute!\"\n % self.optimizer)\n\n return optimal_theta, optimal_rlf_value, optimal_par\n\n def _check_params(self, n_samples=None):\n\n # Check regression model\n if not callable(self.regr):\n if self.regr in self._regression_types:\n self.regr = self._regression_types[self.regr]\n else:\n raise ValueError(\"regr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._regression_types.keys(), self.regr))\n\n # Check regression weights if given (Ordinary Kriging)\n if self.beta0 is not None:\n self.beta0 = np.atleast_2d(self.beta0)\n if self.beta0.shape[1] != 1:\n # Force to column vector\n self.beta0 = self.beta0.T\n\n # Check correlation model\n if not callable(self.corr):\n if self.corr in self._correlation_types:\n self.corr = self._correlation_types[self.corr]\n else:\n raise ValueError(\"corr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._correlation_types.keys(), self.corr))\n\n # Check storage mode\n if self.storage_mode != 'full' and self.storage_mode != 'light':\n raise ValueError(\"Storage mode should either be 'full' or \"\n \"'light', %s was given.\" % self.storage_mode)\n\n # Check correlation parameters\n self.theta0 = np.atleast_2d(self.theta0)\n lth = self.theta0.size\n\n if self.thetaL is not None and self.thetaU is not None:\n self.thetaL = np.atleast_2d(self.thetaL)\n self.thetaU = np.atleast_2d(self.thetaU)\n if self.thetaL.size != lth or self.thetaU.size != lth:\n raise ValueError(\"theta0, thetaL and thetaU must have the \"\n \"same length.\")\n if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):\n raise ValueError(\"The bounds must satisfy O < thetaL <= \"\n \"thetaU.\")\n\n elif self.thetaL is None and self.thetaU is None:\n if np.any(self.theta0 <= 0):\n raise ValueError(\"theta0 must be strictly positive.\")\n\n elif self.thetaL is None or self.thetaU is None:\n raise ValueError(\"thetaL and thetaU should either be both or \"\n \"neither specified.\")\n\n # Force verbose type to bool\n self.verbose = bool(self.verbose)\n\n # Force normalize type to bool\n self.normalize = bool(self.normalize)\n\n # Check nugget value\n self.nugget = np.asarray(self.nugget)\n if np.any(self.nugget) < 0.:\n raise ValueError(\"nugget must be positive or zero.\")\n if (n_samples is not None\n and self.nugget.shape not in [(), (n_samples,)]):\n raise ValueError(\"nugget must be either a scalar \"\n \"or array of length n_samples.\")\n\n # Check optimizer\n if self.optimizer not in self._optimizer_types:\n raise ValueError(\"optimizer should be one of %s\"\n % self._optimizer_types)\n\n # Force random_start type to int\n self.random_start = int(self.random_start)" }, { "identifier": "UtilityFunction", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class UtilityFunction(object):\n \"\"\"\n An object to compute the acquisition functions.\n \"\"\"\n\n def __init__(self, kind, kappa, xi):\n \"\"\"\n If UCB is to be used, a constant kappa is needed.\n \"\"\"\n self.kappa = kappa\n self.xi = xi\n if kind not in ['ucb', 'ei', 'poi']:\n err = \"The utility function \" \\\n \"{} has not been implemented, \" \\\n \"please choose one of ucb, ei, or poi.\".format(kind)\n raise NotImplementedError(err)\n else:\n self.kind = kind\n\n def utility(self, x, gp, y_max):\n if self.kind == 'ucb':\n return self._ucb(x, gp, self.kappa)\n if self.kind == 'ei':\n return self._ei(x, gp, y_max, self.xi)\n if self.kind == 'poi':\n return self._poi(x, gp, y_max, self.xi)\n\n @staticmethod\n def _ucb(x, gp, kappa):\n mean, var = gp.predict(x, eval_MSE=True)\n return mean + kappa * np.sqrt(var)\n\n @staticmethod\n def _ei(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return (mean - y_max - xi) * norm.cdf(z) + np.sqrt(var) * norm.pdf(z)\n\n @staticmethod\n def _poi(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return norm.cdf(z)" }, { "identifier": "unique_rows", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "def unique_rows(a):\n \"\"\"\n A functions to trim repeated rows that may appear when optimizing.\n This is necessary to avoid the sklearn GP object from breaking\n\n :param a: array to trim repeated rows from\n\n :return: mask of unique rows\n \"\"\"\n\n # Sort array and kep track of where things should go back to\n order = np.lexsort(a.T)\n reorder = np.argsort(order)\n\n a = a[order]\n diff = np.diff(a, axis=0)\n ui = np.ones(len(a), 'bool')\n ui[1:] = (diff != 0).any(axis=1)\n\n return ui[reorder]" }, { "identifier": "PrintLog", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class PrintLog(object):\n\n def __init__(self, params):\n\n self.ymax = None\n self.xmax = None\n self.params = params\n self.ite = 1\n\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n # sizes of parameters name and all\n self.sizes = [max(len(ps), 7) for ps in params]\n\n # Sorted indexes to access parameters\n self.sorti = sorted(range(len(self.params)),\n key=self.params.__getitem__)\n\n def reset_timer(self):\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n def print_header(self, initialization=True):\n\n if initialization:\n print(\"{}Initialization{}\".format(BColours.RED,\n BColours.ENDC))\n else:\n print(\"{}Bayesian Optimization{}\".format(BColours.RED,\n BColours.ENDC))\n\n print(BColours.BLUE + \"-\" * (29 + sum([s + 5 for s in self.sizes]))\n + BColours.ENDC)\n\n print(\"{0:>{1}}\".format(\"Step\", 5), end=\" | \")\n print(\"{0:>{1}}\".format(\"Time\", 6), end=\" | \")\n print(\"{0:>{1}}\".format(\"Value\", 10), end=\" | \")\n\n for index in self.sorti:\n print(\"{0:>{1}}\".format(self.params[index],\n self.sizes[index] + 2),\n end=\" | \")\n print('')\n\n def print_step(self, x, y, warning=False):\n\n print(\"{:>5d}\".format(self.ite), end=\" | \")\n\n m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60)\n print(\"{:>02d}m{:>02d}s\".format(int(m), int(s)), end=\" | \")\n\n if self.ymax is None or self.ymax < y:\n self.ymax = y\n self.xmax = x\n print(\"{0}{2: >10.5f}{1}\".format(BColours.MAGENTA,\n BColours.ENDC,\n y),\n end=\" | \")\n\n for index in self.sorti:\n print(\"{0}{2: >{3}.{4}f}{1}\".format(BColours.GREEN,\n BColours.ENDC,\n x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n else:\n print(\"{: >10.5f}\".format(y), end=\" | \")\n for index in self.sorti:\n print(\"{0: >{1}.{2}f}\".format(x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n\n if warning:\n print(\"{}Warning: Test point chose at \"\n \"random due to repeated sample.{}\".format(BColours.RED,\n BColours.ENDC))\n\n print()\n\n self.last_round = datetime.now()\n self.ite += 1\n\n def print_summary(self):\n pass" } ]
import numpy as np from .helpers import GaussianProcess from scipy.optimize import minimize from .helpers import UtilityFunction, unique_rows, PrintLog
11,558
if self.verbose: self.plog.print_step(x, y_init[-1]) # Append any other points passed by the self.initialize method (these # also have a corresponding target value passed by the user). self.init_points += self.x_init # Append the target value of self.initialize method. y_init += self.y_init # Turn it into np array and store. self.X = np.asarray(self.init_points) self.Y = np.asarray(y_init) # Updates the flag self.initialized = True def explore(self, points_dict): """ Method to explore user defined points :param points_dict: :return: """ # Consistency check param_tup_lens = [] for key in self.keys: param_tup_lens.append(len(list(points_dict[key]))) if all([e == param_tup_lens[0] for e in param_tup_lens]): pass else: raise ValueError('The same number of initialization points ' 'must be entered for every parameter.') # Turn into list of lists all_points = [] for key in self.keys: all_points.append(points_dict[key]) # Take transpose of list self.init_points = list(map(list, zip(*all_points))) def initialize(self, points_dict): """ Method to introduce point for which the target function value is known :param points_dict: :return: """ for target in points_dict: self.y_init.append(target) all_points = [] for key in self.keys: all_points.append(points_dict[target][key]) self.x_init.append(all_points) def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds :param new_bounds: A dictionary with the parameter name and its new bounds """ # Update the internal object stored dict self.pbounds.update(new_bounds) # Loop through the all bounds and reset the min-max bound matrix for row, key in enumerate(self.pbounds.keys()): # Reset all entries, even if the same. self.bounds[row] = self.pbounds[key] def maximize(self, init_points=5, n_iter=25, acq='ei', kappa=2.576, xi=0.0, **gp_params): """ Main optimization method. Parameters ---------- :param init_points: Number of randomly chosen points to sample the target function before fitting the gp. :param n_iter: Total number of times the process is to repeated. Note that currently this methods does not have stopping criteria (due to a number of reasons), therefore the total number of points to be sampled must be specified. :param acq: Acquisition function to be used, defaults to Expected Improvement. :param gp_params: Parameters to be passed to the Scikit-learn Gaussian Process object Returns ------- :return: Nothing """ # Reset timer self.plog.reset_timer() # Set acquisition function
""" BAYESIAN OPTIMIZATION MODULE - Version 0.1.0 Created by Fernando Nogueira (fmfn). Available in - https://github.com/fmfn/BayesianOptimization """ __author__ = 'fmfn' def acq_max(ac, gp, y_max, bounds): """ A function to find the maximum of the acquisition function using the 'L-BFGS-B' method. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. Returns ------- :return: x_max, The arg max of the acquisition function. """ # Start with the lower bound as the argmax x_max = bounds[:, 0] max_acq = None x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(100, bounds.shape[0])) for x_try in x_tries: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun >= max_acq: x_max = res.x max_acq = -res.fun # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1]) def matern52(theta, d): """ Matern 5/2 correlation model.:: theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation modle. """ theta = np.asarray(theta, dtype=np.float) d = np.asarray(d, dtype=np.float) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0] elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1)) return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r) class BayesianOptimization(object): def __init__(self, f, pbounds, verbose=1): """ :param f: Function to be maximized. :param pbounds: Dictionary with parameters names as keys and a tuple with minimum and maximum values. :param verbose: Whether or not to print progress. """ # Store the original dictionary self.pbounds = pbounds # Get the name of the parameters self.keys = list(pbounds.keys()) # Find number of parameters self.dim = len(pbounds) # Create an array with parameters bounds self.bounds = [] for key in self.pbounds.keys(): self.bounds.append(self.pbounds[key]) self.bounds = np.asarray(self.bounds) # Some function to be optimized self.f = f # Initialization flag self.initialized = False # Initialization lists --- stores starting points before process begins self.init_points = [] self.x_init = [] self.y_init = [] # Numpy array place holders self.X = None self.Y = None # Counter of iterations self.i = 0 # Since scipy 0.16 passing lower and upper bound to theta seems to be # broken. However, there is a lot of development going on around GP # is scikit-learn. So I'll pick the easy route here and simple specify # only theta0. self.gp = GaussianProcess(corr=matern52, theta0=np.random.uniform(0.001, 0.05, self.dim), thetaL=1e-5 * np.ones(self.dim), thetaU=1e0 * np.ones(self.dim), random_start=30) # Utility Function placeholder self.util = None # PrintLog object self.plog = PrintLog(self.keys) # Output dictionary self.res = {} # Output dictionary self.res['max'] = {'max_val': None, 'max_params': None} self.res['all'] = {'values': [], 'params': []} # Verbose self.verbose = verbose def init(self, init_points): """ Initialization method to kick start the optimization process. It is a combination of points passed by the user, and randomly sampled ones. :param init_points: Number of random points to probe. """ # Generate random points rp = [np.random.uniform(x[0], x[1], size=init_points) for x in self.bounds] # Concatenate new random points to possible existing # points from self.explore method. self.init_points += list(map(list, zip(*rp))) # Create empty list to store the new values of the function y_init = [] # Evaluate target function at all initialization # points (random + explore) for x in self.init_points: y_init.append(self.f(**dict(zip(self.keys, x)))) if self.verbose: self.plog.print_step(x, y_init[-1]) # Append any other points passed by the self.initialize method (these # also have a corresponding target value passed by the user). self.init_points += self.x_init # Append the target value of self.initialize method. y_init += self.y_init # Turn it into np array and store. self.X = np.asarray(self.init_points) self.Y = np.asarray(y_init) # Updates the flag self.initialized = True def explore(self, points_dict): """ Method to explore user defined points :param points_dict: :return: """ # Consistency check param_tup_lens = [] for key in self.keys: param_tup_lens.append(len(list(points_dict[key]))) if all([e == param_tup_lens[0] for e in param_tup_lens]): pass else: raise ValueError('The same number of initialization points ' 'must be entered for every parameter.') # Turn into list of lists all_points = [] for key in self.keys: all_points.append(points_dict[key]) # Take transpose of list self.init_points = list(map(list, zip(*all_points))) def initialize(self, points_dict): """ Method to introduce point for which the target function value is known :param points_dict: :return: """ for target in points_dict: self.y_init.append(target) all_points = [] for key in self.keys: all_points.append(points_dict[target][key]) self.x_init.append(all_points) def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds :param new_bounds: A dictionary with the parameter name and its new bounds """ # Update the internal object stored dict self.pbounds.update(new_bounds) # Loop through the all bounds and reset the min-max bound matrix for row, key in enumerate(self.pbounds.keys()): # Reset all entries, even if the same. self.bounds[row] = self.pbounds[key] def maximize(self, init_points=5, n_iter=25, acq='ei', kappa=2.576, xi=0.0, **gp_params): """ Main optimization method. Parameters ---------- :param init_points: Number of randomly chosen points to sample the target function before fitting the gp. :param n_iter: Total number of times the process is to repeated. Note that currently this methods does not have stopping criteria (due to a number of reasons), therefore the total number of points to be sampled must be specified. :param acq: Acquisition function to be used, defaults to Expected Improvement. :param gp_params: Parameters to be passed to the Scikit-learn Gaussian Process object Returns ------- :return: Nothing """ # Reset timer self.plog.reset_timer() # Set acquisition function
self.util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
1
2023-11-17 09:23:38+00:00
16k
OpenBMB/XAgent
command.py
[ { "identifier": "XAgentServerEnv", "path": "XAgentServer/application/core/envs.py", "snippet": "class XAgentServerEnv:\n \"\"\"\n XAgentServer environment variables\n if you change value of the environment variable, you need to restart \n the XAgentServer by running the following command:\n `python start_server.py`\n or start a unicorn server by yourself\n \"\"\"\n app = \"app:app\"\n prod: bool = config.get(\"PROD\", \"False\").lower() == \"true\"\n base_dir = \"XAgentServer\"\n use_redis: bool = False\n recorder_root_dir = \"running_records\"\n # you can set default_login with True,\n # use the default user \"admin\" with token \"xagent-admin\" to login,\n default_login: bool = True\n # only one XAgentServer can be set to check whether the interaction is running.\n check_running: bool = False\n host = \"0.0.0.0\"\n port = 8090\n debug = True\n reload = True\n workers = 1\n share_url = \"https://x-agent.net/api/conv/community\"\n\n class DB:\n \"\"\"\n database config\n \"\"\"\n use_db = True\n db_url = \"mysql+pymysql://root:xagent@localhost:3306/xagent\"\n\n class Redis:\n \"\"\"\n redis config\n \"\"\"\n use_redis = False\n redis_url = \"redis://localhost\"\n redis_host = \"localhost\"\n redis_port = 6379\n redis_db = 0\n redis_password = \"xagent\"\n\n # if you want to use email to send message,\n # you can set send_email to True and set\n # email_host,\n # email_port,\n # email_user,\n # email_password,\n # auth_server\n class Email:\n \"\"\"\n email config\n \"\"\"\n send_email = False\n email_host = \"\"\n email_port = 465\n email_user = \"\"\n email_password = \"\"\n auth_server = \"\"\n\n # if you want to use upload function,\n # you can set upload_dir to the path of the upload directory\n # and set upload_allowed_types of the allowed types\n class Upload:\n \"\"\"\n upload config\n \"\"\"\n upload_dir = \"XAgentServer/localstorage/upload\"\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n upload_allowed_types = [\"image/png\", \"image/jpeg\",\n \"image/gif\", \"text/plain\",\n \"application/msword\", \"pdf\",\n \"txt\", \"pptx\", \"xlsx\",\n \"doc\", \"ppt\", \"xls\",\n \"zip\", \"rar\", \"tar\",\n \"gz\", \"7z\", \"bz2\",\n \"tgz\", \"tbz2\", \"tar.gz\",\n \"tar.bz2\"]" }, { "identifier": "SessionLocal", "path": "XAgentServer/database/connect.py", "snippet": "SQLALCHEMY_DATABASE_URL = os.getenv('MYSQL_DB_URL', XAgentServerEnv.DB.db_url)" }, { "identifier": "StatusEnum", "path": "XAgentServer/enums/status.py", "snippet": "class StatusEnum:\n \"\"\"XAgent Status Enum\n \"\"\"\n START = \"start\"\n SUBTASK = \"subtask\"\n REFINEMENT = \"refinement\"\n INNER = \"inner\"\n FINISHED = \"finished\"\n FAILED = \"failed\"\n SUBMIT = \"subtask_submit\"\n RUNNING = \"running\"\n ASK_FOR_HUMAN_HELP = \"ask_for_human_help\"\n CLOSED = \"closed\"" }, { "identifier": "XAgentError", "path": "XAgentServer/exts/exception_ext.py", "snippet": "class XAgentError(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n def __init__(self, message=\"XAgent Error!\"):\n self.message = message\n super().__init__(self.message)" }, { "identifier": "XAgentInteraction", "path": "XAgentServer/interaction.py", "snippet": "class XAgentInteraction(metaclass=abc.ABCMeta):\n \"\"\"\n XAgent 核心交互组件集, 引用: XAgentCE\n Attributes:\n base: 交互基本信息\n parameter: 交互参数\n interrupt: 是否包含中断\n toolserver: 工具服务\n call_method: 调用方式\n wait_seconds: 等待时间\n \n Components:\n logger: 日志\n db: 数据库\n recorder: 运行记录\n toolserver_interface: 工具服务接口\n \n 组件集中的所有组件全局唯一\n\n \"\"\"\n\n def __init__(\n self,\n base: InteractionBase,\n parameter: InteractionParameter,\n interrupt: bool = False,\n call_method: str = \"web\",\n wait_seconds: int = 600,\n ) -> None:\n self.base = base\n self.parameter = parameter\n # 唯一标识当前的执行步骤\n self.current_step = uuid.uuid4().hex\n self.logger = None\n self.interrupt = interrupt\n self.call_method = call_method\n self.wait_seconds = wait_seconds\n self.log_dir = os.path.join(\n os.path.join(XAgentServerEnv.base_dir,\n \"localstorage\",\n \"interact_records\"),\n datetime.now().strftime(\"%Y-%m-%d\"),\n self.base.interaction_id)\n self.human_data = None\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n self.extract_dir = os.path.join(self.log_dir, \"workspace\")\n if not os.path.exists(self.extract_dir):\n os.makedirs(self.extract_dir)\n\n self.db: Session = None\n self.toolserver_interface = None\n\n def register_toolserver_interface(self, toolserver_interface: ToolServerInterface):\n \"\"\"register tool server interface\"\"\"\n self.toolserver_interface = toolserver_interface\n\n def resister_logger(self, logger: Logger):\n \"\"\"\n 注册logger, 根据会话id创建日志文件夹, 并创建日志文件\n \"\"\"\n\n self.logger = logger\n self.logger.info(f\"init interaction: {self.base.interaction_id}\")\n\n def register_db(self, db: Session):\n \"\"\"\n 注册db\n\n Args:\n db: Session对象\n \"\"\"\n self.db = db\n\n def insert_data(self,\n data: dict,\n status=\"\",\n current: str = None,\n is_include_pictures: bool = False,):\n \"\"\"\n 更新缓存, 推送数据\n \"\"\"\n # check alive\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited.\")\n exit(0)\n self.current_step = uuid.uuid4().hex\n\n if status == \"inner\":\n tool_name = data.get(\"using_tools\", {}).get(\n \"tool_name\", \"\") if isinstance(data, dict) else \"\"\n\n if tool_name == \"subtask_submit\":\n status = StatusEnum.SUBMIT\n\n # download workspace files\n self.download_files()\n\n file_list = os.listdir(self.extract_dir)\n\n # insert raw\n process = XAgentRaw(\n node_id=self.current_step,\n interaction_id=self.base.interaction_id,\n current=current,\n step=0,\n data=data,\n file_list=file_list,\n status=status,\n do_interrupt=self.interrupt,\n wait_seconds=0,\n ask_for_human_help=False,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data=None,\n human_file_list=[],\n is_send=self.call_method != 'web',\n is_receive=False,\n include_pictures=is_include_pictures,\n )\n if status == StatusEnum.FINISHED:\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=StatusEnum.FINISHED,\n message=\"finished\",\n current_step=self.current_step)\n else:\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=\"running\",\n message=\"running\",\n current_step=self.current_step)\n InteractionCRUD.insert_raw(db=self.db, process=process)\n if self.call_method == \"web\":\n redis.set_key(self.base.interaction_id + \"_send\", 1)\n elif self.call_method == \"cmd\":\n # print workspace file list\n file_list_str = \", \".join(file_list) \n self.logger.typewriter_log(\n title=f\"-=-=-=-=-=-=-= {self.base.interaction_id}, {self.current_step}, WORKSPACE FILE LIST -=-=-=-=-=-=-=\\n\",\n title_color=Fore.GREEN,\n content=f\"[{file_list_str}] in {self.extract_dir}\"\n )\n\n def download_files(self):\n \"\"\"download files\n\n Returns:\n Boolean: True or False\n \"\"\"\n try:\n save_path = self.toolserver_interface.download_all_files()\n\n if os.path.exists(save_path):\n zip_file = zipfile.ZipFile(save_path)\n zip_list = zip_file.namelist() # 得到压缩包里所有文件\n for f in zip_list:\n zip_file.extract(f, self.extract_dir) # 循环解压文件到指定目录\n\n zip_file.close()\n return True\n except zipfile.BadZipFile:\n return False\n\n def receive(self, can_modify=None):\n \"\"\"\n 接收数据\n \"\"\"\n\n if self.call_method == \"web\":\n wait = 0\n while wait < self.wait_seconds:\n human_data = self.get_human_data()\n if human_data is not None:\n return human_data\n else:\n wait += 2\n time.sleep(2)\n\n raise XAgentTimeoutError(\"等待数据超时,关闭连接\")\n else:\n print(can_modify)\n\n def get_human_data(self):\n \"\"\"\n 获取人类数据\n \"\"\"\n # check alive, ensure the interaction is alive\n # if The user terminated this action and exited\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited!\")\n exit(0)\n receive_key = self.base.interaction_id + \"_\" + self.current_step + \"_receive\"\n is_receive = redis.get_key(receive_key)\n\n if is_receive:\n raw = InteractionCRUD.get_raw(\n db=self.db, interaction_id=self.base.interaction_id, node_id=self.current_step)\n\n if raw and raw.is_human and raw.is_receive:\n redis.delete_key(receive_key)\n return raw.human_data\n\n return None\n\n def ask_for_human_help(self, data):\n \"\"\"调用工具时,请求人类帮助\n Execute the tool and ask for human help\n \"\"\"\n\n self.current_step = uuid.uuid4().hex\n self.download_files()\n file_list = os.listdir(self.extract_dir)\n # special: ask for human help and do interrupt\n # send data\n process = XAgentRaw(\n node_id=self.current_step,\n interaction_id=self.base.interaction_id,\n current=self.current_step,\n step=0,\n data=data,\n file_list=file_list,\n status=StatusEnum.ASK_FOR_HUMAN_HELP,\n do_interrupt=True,\n wait_seconds=0,\n ask_for_human_help=True,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data=None,\n human_file_list=[],\n is_send=False,\n is_receive=False,\n include_pictures=False,\n )\n\n # insert into mysql\n InteractionCRUD.insert_raw(db=self.db, process=process)\n\n # set redis\n redis.set_key(self.base.interaction_id + \"_send\", 1)\n\n # set status\n\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=StatusEnum.ASK_FOR_HUMAN_HELP,\n message=\"ask for human help\",\n current_step=self.current_step)\n\n # check alive\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited!\")\n exit(0)\n\n # wait for human data\n wait = 0\n while wait < self.wait_seconds:\n human_data = self.get_human_data()\n if human_data is not None:\n return human_data\n else:\n wait += 2\n time.sleep(2)\n\n raise XAgentTimeoutError(\"ASK-For-Human-Data: 等待数据超时,关闭连接\")" }, { "identifier": "Logger", "path": "XAgentServer/loggers/logs.py", "snippet": "class Logger(metaclass=abc.ABCMeta):\n \"\"\"\n Logger that handle titles in different colors.\n Outputs logs in console, activity.log, and errors.log\n For console handler: simulates typing\n \"\"\"\n\n def __init__(self, log_dir: str = None, log_name: str= \"\", log_file: str = \"activity.log\", error_file: str = \"errors.log\"):\n \"\"\"init\"\"\"\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # create log directory if it doesn't exist\n self.log_name = time.strftime(\"%Y-%m-%d\", time.localtime()) if not log_name else log_name\n self.logger = logging.getLogger(self.log_name)\n console_formatter = RecordFormatter(\"%(title_color)s %(message)s\")\n\n # Create a handler for console which simulate typing\n self.typing_console_handler = TypingConsoleHandler()\n self.typing_console_handler.setLevel(logging.INFO)\n self.typing_console_handler.setFormatter(console_formatter)\n\n # Create a handler for console without typing simulation\n self.console_handler = ConsoleHandler()\n self.console_handler.setLevel(logging.DEBUG)\n self.console_handler.setFormatter(console_formatter)\n\n self.speak_mode = False\n self.chat_plugins = []\n\n # Info handler in activity.log\n self.file_handler = logging.FileHandler(\n os.path.join(log_dir, log_file), \"a\", \"utf-8\"\n )\n self.file_handler.setLevel(logging.DEBUG)\n info_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s: %(title_color)s %(title)s %(message)s\"\n )\n self.file_handler.setFormatter(info_formatter)\n\n # Error handler error.log\n error_handler = logging.FileHandler(\n os.path.join(log_dir, error_file), \"a\", \"utf-8\"\n )\n error_handler.setLevel(logging.ERROR)\n error_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title_color)s %(title)s\"\n \" %(message_no_color)s\"\n )\n error_handler.setFormatter(error_formatter)\n\n # self.typing_logger = logging.getLogger(self.log_name)\n # if not self.typing_logger.handlers:\n # self.typing_logger.addHandler(self.typing_console_handler)\n # self.typing_logger.addHandler(self.file_handler)\n # self.typing_logger.addHandler(error_handler)\n # self.typing_logger.setLevel(logging.DEBUG)\n\n if self.log_name.endswith(\"_INTERACT\") or not self.logger.handlers:\n # self.logger.addHandler(self.typing_console_handler)\n self.logger.addHandler(self.console_handler)\n self.logger.addHandler(error_handler)\n self.logger.addHandler(self.file_handler)\n self.logger.setLevel(logging.DEBUG)\n \n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n # if speak_text and self.speak_mode:\n # say_text(f\"{title}. {content}\")\n\n for plugin in self.chat_plugins:\n plugin.report(f\"{title}. {content}\")\n\n if content:\n if isinstance(content, list):\n content = \" \".join(content)\n else:\n content = \"\"\n\n self.logger.log(\n level, content, extra={\"title\": title, \"color\": title_color}\n )\n\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.DEBUG)\n\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.INFO)\n\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.WARN)\n\n def error(self, title, message=\"\"):\n self._log(title, Fore.RED, message, logging.ERROR)\n\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n if message:\n if isinstance(message, list):\n message = \" \".join(message)\n self.logger.log(\n level, message, extra={\"title\": str(title), \"color\": str(title_color)}\n )\n\n def set_level(self, level):\n self.logger.setLevel(level)\n self.typing_logger.setLevel(level)\n\n def double_check(self, additionalText=None):\n if not additionalText:\n additionalText = (\n \"Please ensure you've setup and configured everything\"\n \" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to \"\n \"double check. You can also create a github issue or join the discord\"\n \" and ask there!\"\n )\n\n self.typewriter_log(\"DOUBLE CHECK CONFIGURATION\", Fore.YELLOW, additionalText)\n\n def log_json(self, data: Any, file_name: str) -> None:\n # Define log directory\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n\n # Create a handler for JSON files\n json_file_path = os.path.join(log_dir, file_name)\n json_data_handler = JsonFileHandler(json_file_path)\n json_data_handler.setFormatter(JsonFormatter())\n\n # Log the JSON data using the custom file handler\n self.json_logger.addHandler(json_data_handler)\n self.json_logger.debug(data)\n self.json_logger.removeHandler(json_data_handler)\n\n def get_log_directory(self):\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n return os.path.abspath(log_dir)" }, { "identifier": "InteractionBase", "path": "XAgentServer/models/interaction.py", "snippet": "class InteractionBase(metaclass=abc.ABCMeta):\n def __init__(self,\n interaction_id: str,\n user_id: str,\n create_time: str,\n description: str,\n agent: str = \"\",\n mode: str = \"\",\n file_list: list = [],\n recorder_root_dir: str = \"\",\n status: str = \"\",\n message: str = \"\",\n current_step: str = \"\",\n update_time: str = \"\",\n is_deleted: bool = False,\n call_method: str = \"web\",\n ):\n self.interaction_id = interaction_id\n self.user_id = user_id\n self.create_time = create_time\n self.description = description\n self.agent = agent\n self.mode = mode\n self.file_list = file_list\n self.recorder_root_dir = recorder_root_dir\n self.status = status\n self.message = message\n self.current_step = current_step\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.call_method = call_method\n\n def to_dict(self, include=None, exclude=None):\n data = {\n \"interaction_id\": self.interaction_id,\n \"user_id\": self.user_id,\n \"create_time\": self.create_time,\n \"description\": self.description,\n \"agent\": self.agent,\n \"mode\": self.mode,\n \"file_list\": self.file_list,\n \"recorder_root_dir\": self.recorder_root_dir,\n \"status\": self.status,\n \"message\": self.message,\n \"current_step\": self.current_step,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"call_method\": self.call_method,\n }\n if include:\n data = {k: v for k, v in data.items() if k in include}\n if exclude:\n data = {k: v for k, v in data.items() if k not in exclude}\n return data\n \n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n \n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.user_id,\n interaction.create_time,\n interaction.description,\n interaction.agent,\n interaction.mode,\n interaction.file_list,\n interaction.recorder_root_dir,\n interaction.status,\n interaction.message,\n interaction.current_step,\n interaction.update_time,\n interaction.is_deleted,\n interaction.call_method,\n )" }, { "identifier": "InteractionParameter", "path": "XAgentServer/models/parameter.py", "snippet": "class InteractionParameter(metaclass=abc.ABCMeta):\n \"\"\"\n 交互参数\n \"\"\"\n\n def __init__(self,\n interaction_id: str,\n parameter_id: str,\n args: Union[str, dict, None] = None\n ):\n self.interaction_id = interaction_id\n self.args = args\n self.parameter_id = parameter_id\n\n def to_dict(self):\n return {\n \"interaction_id\": self.interaction_id,\n \"parameter_id\": self.parameter_id,\n \"args\": self.args,\n }\n\n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.parameter_id,\n interaction.args\n )" }, { "identifier": "XAgentRaw", "path": "XAgentServer/models/raw.py", "snippet": "class XAgentRaw(metaclass=abc.ABCMeta):\n \"\"\"XAgent Raw Object\"\"\"\n\n def __init__(self, node_id: str,\n interaction_id: str,\n current: str,\n step: int,\n data: dict,\n file_list: list,\n status: str,\n do_interrupt: bool,\n wait_seconds: int,\n ask_for_human_help: bool,\n create_time: str,\n update_time: str,\n is_deleted: bool,\n is_human: bool,\n human_data: dict,\n human_file_list: list,\n is_send: bool,\n is_receive: bool,\n include_pictures: bool = False,):\n self.node_id = node_id\n self.interaction_id = interaction_id\n self.current = current\n self.step = step\n self.data = data\n self.file_list = file_list\n self.status = status\n self.do_interrupt = do_interrupt\n self.wait_seconds = wait_seconds\n self.ask_for_human_help = ask_for_human_help\n self.create_time = create_time\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.is_human = is_human\n self.human_data = human_data\n self.human_file_list = human_file_list\n self.is_send = is_send\n self.is_receive = is_receive\n self.include_pictures = include_pictures\n\n def to_dict(self):\n \"\"\"XAgent Raw Object to dict\"\"\"\n return {\n \"node_id\": self.node_id,\n \"interaction_id\": self.interaction_id,\n \"current\": self.current,\n \"step\": self.step,\n \"data\": self.data,\n \"file_list\": self.file_list,\n \"status\": self.status,\n \"do_interrupt\": self.do_interrupt,\n \"wait_seconds\": self.wait_seconds,\n \"ask_for_human_help\": self.ask_for_human_help,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"is_human\": self.is_human,\n \"human_data\": self.human_data,\n \"human_file_list\": self.human_file_list,\n \"is_send\": self.is_send,\n \"is_receive\": self.is_receive,\n \"include_pictures\": self.include_pictures\n }\n\n def to_json(self):\n \"\"\"XAgent Raw Object to json\"\"\"\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n \"\"\"XAgent Raw Object from json\"\"\"\n return cls(**json_data)\n\n def update(self, update_data: dict):\n \"\"\"XAgent Raw Object update\"\"\"\n for k, v in update_data.items():\n setattr(self, k, v)\n return self\n\n @classmethod\n def from_db(cls, db_data):\n \"\"\"XAgent Raw Object from db\"\"\"\n return cls(\n node_id=db_data.node_id,\n interaction_id=db_data.interaction_id,\n current=db_data.current,\n step=db_data.step,\n data=db_data.data,\n file_list=db_data.file_list,\n status=db_data.status,\n do_interrupt=db_data.do_interrupt,\n wait_seconds=db_data.wait_seconds,\n ask_for_human_help=db_data.ask_for_human_help,\n create_time=db_data.create_time,\n update_time=db_data.update_time,\n is_deleted=db_data.is_deleted,\n is_human=db_data.is_human,\n human_data=db_data.human_data,\n human_file_list=db_data.human_file_list,\n is_send=db_data.is_send,\n is_receive=db_data.is_receive,\n include_pictures=db_data.include_pictures\n )" }, { "identifier": "XAgentServer", "path": "XAgentServer/server.py", "snippet": "class XAgentServer:\n \"\"\"XAgent Server Start Class\n \"\"\"\n\n def __init__(self, logger: Logger) -> None:\n self.logger: Logger = logger\n\n def interact(self, interaction: XAgentInteraction):\n # query = message\n \"\"\"\n XAgent Server Start Function\n \"\"\"\n from XAgent.config import CONFIG as config\n xagent_core = None\n try:\n config.reload()\n args = {}\n # args\n args = interaction.parameter.args\n\n self.logger.info(\n f\"server is running, the start query is {args.get('goal', '')}\")\n xagent_param = XAgentParam()\n\n # build query\n xagent_param.build_query({\n \"role_name\": \"Assistant\",\n \"task\": args.get(\"goal\", \"\"),\n \"plan\": args.get(\"plan\", [\"Pay attention to the language in initial goal, always answer with the same language of the initial goal given.\"]),\n })\n xagent_param.build_config(config)\n xagent_core = XAgentCoreComponents()\n # build XAgent Core Components\n xagent_core.build(xagent_param, interaction=interaction)\n json_str = json.dumps(\n xagent_param.config.to_dict(), indent=2)\n json_str=re.sub(r'\"api_key\": \"(.+?)\"', r'\"api_key\": \"**\"', json_str)\n self.logger.info(json_str)\n self.logger.typewriter_log(\n \"Human-In-The-Loop\",\n Fore.RED,\n str(xagent_param.config.enable_ask_human_for_help),\n )\n\n file_list = interaction.base.file_list\n for file in file_list:\n file_uuid = file.get(\"uuid\", \"\")\n file_name = file.get(\"name\", \"\")\n if file_uuid.startswith(\"/\"):\n file_path = file_uuid\n else:\n file_path = os.path.join(XAgentServerEnv.Upload.upload_dir,\n interaction.base.user_id, file_uuid)\n\n upload_dir = os.path.join(\n xagent_core.base_dir, \"upload\")\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n # 拷贝到workspace\n if interaction.call_method == \"web\":\n shutil.copy(file_path, os.path.join(upload_dir, file_name))\n else:\n if os.path.exists(file_path):\n if os.path.samefile(file_path, os.path.join(upload_dir, file_name)):\n # 文件路径相同,跳过复制\n pass\n else:\n shutil.copy(file_path, os.path.join(upload_dir, file_name))\n # shutil.copy(file_path, os.path.join(upload_dir, file_name))\n\n new_file = os.path.join(upload_dir, file_name)\n try:\n xagent_core.toolserver_interface.upload_file(new_file)\n except Exception as e:\n self.logger.typewriter_log(\n \"Error happens when uploading file\",\n Fore.RED,\n f\"{new_file}\\n{e}\",\n )\n raise XAgentUploadFileError(str(e)) from e\n\n task_handler = TaskHandler(xagent_core=xagent_core,\n xagent_param=xagent_param)\n self.logger.info(\"Start outer loop async\")\n task_handler.outer_loop()\n except Exception as e:\n raise XAgentRunningError(str(e)) from e\n finally:\n if xagent_core is not None:\n xagent_core.close()" }, { "identifier": "InteractionCRUD", "path": "XAgentServer/application/cruds/interaction.py", "snippet": "class InteractionCRUD(metaclass=abc.ABCMeta):\n \"\"\"\n interaction crud\n \"\"\"\n\n @classmethod\n def search_many_interaction(cls, db: Session) -> list:\n \"\"\"\n search many interaction\n \"\"\"\n try:\n return InteractionDBInterface.search_many_interaction(db=db)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_interaction(cls, db: Session, interaction_id: str) -> InteractionBase | None:\n \"\"\"\n get interaction\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n interaction InteractionBase\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_interaction(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def create_interaction(cls, db: Session, base: InteractionBase):\n \"\"\"\n create interaction\n Args:\n db: db\n base: base\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.create_interaction(db=db, base=base)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n @classmethod\n def get_ready_interaction(cls, db: Session, user_id: str):\n \"\"\"\n create interaction\n Args:\n db: db\n user_id: user_id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_ready_interaction(db=db, user_id=user_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n\n @classmethod\n def add_parameter(cls, db: Session, parameter: InteractionParameter = None):\n \"\"\"\n add parameter\n Args:\n db: db\n parameter: parameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.add_parameter(db=db, parameter=parameter)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_parameter(cls, db: Session, interaction_id: str) -> list:\n \"\"\"\n get parameter\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n parameter list [InteractionParameter]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n \n @classmethod\n def get_init_parameter(cls, db: Session, interaction_id: str) -> InteractionParameter:\n \"\"\"\n get init parameter\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n parameter InteractionParameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n parameters = InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)\n init_parameter = parameters[0]\n parameter = InteractionParameter.from_json({\"args\": init_parameter, \"interaction_id\": interaction_id, \"parameter_id\": None})\n return parameter\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_interaction_by_user_id(cls,\n db: Session,\n user_id: str,\n page_size: int = 10,\n page_num: int = 1) -> list[dict]:\n \"\"\"\n get interaction by user id\n Args:\n db: db\n user_id: user id\n page_size: page size\n page_num: page num\n Returns:\n interaction list [dict]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n return InteractionDBInterface.search_interaction_by_user_id(db=db,\n user_id=user_id,\n page_size=page_size,\n page_num=page_num)\n\n @classmethod\n def is_exist(cls, db: Session, interaction_id: str) -> bool:\n \"\"\"\n interaction is exist\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n True if interaction is exist, else False\n \n Raises:\n XAgentDBError: XAgent DB Error \n \"\"\"\n try:\n return InteractionDBInterface.is_exist(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction(cls, db: Session, base_data: dict):\n \"\"\"\n update interaction\n Args:\n db: db\n base_data: base data\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_interaction(db=db, base_data=base_data)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction_status(cls,\n db: Session,\n interaction_id: str,\n status: str,\n message: str,\n current_step: int):\n \"\"\"\n update interaction status\n Args:\n db: db\n interaction_id: interaction id\n status: status\n message: message\n current_step: current step\n \n Raises:\n XAgentDBError: XAgent DB Error \n \"\"\"\n try:\n InteractionDBInterface.update_interaction_status(\n db=db,\n interaction_id=interaction_id,\n status=status,\n message=message,\n current_step=current_step)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction_parameter(cls,\n db: Session,\n interaction_id: str,\n parameter: InteractionParameter):\n \"\"\"\n update interaction parameter\n Args:\n db: db\n interaction_id: interaction id\n parameter: parameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_interaction_parameter(\n db=db,\n interaction_id=interaction_id,\n parameter=parameter)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def is_running(cls, db: Session, user_id: str):\n \"\"\"\n is running\n Args:\n db: db\n user_id: user id\n Returns:\n True if running, else False\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.is_running(db=db, user_id=user_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def delete_interaction(cls, db: Session, interaction_id: str):\n \"\"\"\n delete interaction\n Args:\n db: db\n interaction_id: interaction id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.delete_interaction(\n db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_shared_interaction(cls,\n db: Session,\n interaction_id: str) -> InteractionBase | None:\n \"\"\"\n get shared interaction\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n interaction InteractionBase, if not found, return None\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_shared_interaction(\n db=db,\n interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_many_shared(cls,\n db: Session,\n page_size: int = 20,\n page_index: int = 1) -> list[dict]:\n \"\"\"\n search many shared\n Args:\n db: db\n page_size: page size\n page_index: page index\n Returns:\n interaction list [dict]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.search_many_shared(db=db,\n page_size=page_size,\n page_index=page_index)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def insert_raw(cls, db: Session, process: XAgentRaw):\n \"\"\"\n insert raw\n Args:\n db: db\n process: process\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.insert_raw(db=db, process=process)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:\n \"\"\"\n search many raws\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n raw list [XAgentRaw]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return [XAgentRaw.from_db(raw) for raw in \n InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:\n \"\"\"\n get raw\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n Returns:\n raw XAgentRaw, if not found, return None\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_raw(db=db,\n interaction_id=interaction_id,\n node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_next_send(cls, db: Session, interaction_id: str) -> List[Raw] | None:\n \"\"\"\n get next send\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n raw list [Raw]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_next_send(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_send_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update send flag\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_send_flag(\n db=db, interaction_id=interaction_id, node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_receive_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update receive flag\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_receive_flag(\n db=db, interaction_id=interaction_id, node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_human_data(cls,\n db: Session,\n interaction_id: str,\n node_id: str,\n human_data: dict):\n \"\"\"\n update human data\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n human_data: human data\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_human_data(db=db,\n interaction_id=interaction_id,\n node_id=node_id,\n human_data=human_data)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def insert_error(cls,\n db: Session,\n interaction_id: str,\n message: str,\n status: str = \"failed\"):\n \"\"\"\n insert error\n Args:\n db: db\n interaction_id: interaction id\n message: message\n status: status, default is failed\n Returns:\n raw XAgentRaw\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n process = XAgentRaw(\n node_id=uuid.uuid4().hex,\n interaction_id=interaction_id,\n current=\"\",\n step=0,\n data=message,\n file_list=[],\n status=status,\n do_interrupt=False,\n wait_seconds=0,\n ask_for_human_help=False,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data={},\n human_file_list=[],\n is_send=False,\n is_receive=False,\n include_pictures=False,\n )\n InteractionDBInterface.insert_raw(db=db, process=process)\n return process\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def add_share(cls, db: Session, share):\n \"\"\"\n add share\n Args:\n db: db\n share: share\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.add_share(db=db, shared=share)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n \n @classmethod\n def get_finish_status(cls, db: Session, interaction_id: str) -> bool:\n \"\"\"\n get finish status\n \n Args:\n db: db\n interaction_id: interaction id\n \n Returns:\n True if finish, else False\n \"\"\"\n try:\n return InteractionDBInterface.get_finish_status(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e" }, { "identifier": "redis", "path": "XAgentServer/application/global_val.py", "snippet": "def init_yag(logger):\ndef init_executor(logger):" }, { "identifier": "CommandLineInput", "path": "command_input.py", "snippet": "class CommandLineInput:\n \"\"\"\n Class for handling command line input.\n\n This child class extends from BaseInput and implements methods to handle and manage command line input data.\n\n Attributes:\n do_interrupt (bool): If True, input will be interrupted.\n max_wait_seconds (int): Maximum wait time for input in seconds.\n \"\"\"\n def __init__(self,\n do_interrupt: bool = False,\n max_wait_seconds: int = 600,\n logger=None):\n self.do_interrupt = do_interrupt\n self.max_wait_seconds = max_wait_seconds\n self.logger = logger\n\n def run(self, input_data):\n \"\"\"\n Run the command line input method.\n\n Args:\n input_data (Any): The original input data to be processed.\n\n Returns:\n data (Any): The processed input data.\n \"\"\"\n if self.do_interrupt:\n data = self.interrupt(input_data)\n else:\n data = input_data\n return data\n \n def get_each_input(self, key, value, res, timeout):\n \"\"\"\n Returns the input from the command line for a single key-value pair.\n\n Args:\n key (str): The key for which to get input.\n value (Any): The current value associated with the key.\n res (dict): The result dictionary where inputs collected will be stored.\n timeout (int): Timeout in seconds for the input.\n\n Returns:\n Any: The input data.\n \"\"\"\n self.logger.typewriter_log(\n f\"Now, ASK For {key}, Origin Input: {value}\",\n Fore.RED,\n f\"\"\n )\n self.logger.typewriter_log(\n f\"Now, you can modify the current field by entering some information, and then press 'Enter' to continue, if you want to keep the original input, please enter '-1' and then press 'Enter':\",\n Fore.GREEN\n )\n temp = inputimeout(prompt=f'You have {timeout} seconds to input:\\n', timeout=timeout)\n if temp == \"-1\":\n return value\n else:\n return temp\n \n def get_input(self, origin_data):\n \"\"\"\n Get input for all fields of the original data from the command line.\n\n Args:\n origin_data (dict): The original data for which to get input.\n\n Returns:\n dict: The dictionary with updated inputs.\n \"\"\"\n self.logger.typewriter_log(\n \"Next, you can start modifying the original input by typing 'Y/y/yes' or skip this step by typing 'N/n/no' and then press 'Enter' to continue the loop:\",\n Fore.RED\n )\n update = inputimeout(prompt=f'You have to make a decision within 60 seconds:\\n', timeout=60)\n res = {\"args\": {}}\n if update in ['y', 'Y', 'yes']:\n execute_time = self.max_wait_seconds\n if isinstance(origin_data, dict):\n args = origin_data.get(\"args\", \"\")\n self.logger.typewriter_log(\n f\"Next, you will have a total of {self.max_wait_seconds} seconds to modify each option:\",\n Fore.RED,\n )\n for key, value in args.items():\n if key == \"done\":\n res[key] = False\n continue\n start_time = time.time()\n res[\"args\"][key] = self.get_each_input(key, value, res, execute_time)\n end_time = time.time()\n execute_time = math.floor(execute_time - (end_time - start_time))\n self.logger.info(f\"modify the input, receive the data: {res}\")\n else:\n res = origin_data\n self.logger.info(\"skip this step\")\n self.logger.info(\"continue the loop\")\n res[\"done\"] = True\n return res\n \n def interrupt(self, input_data):\n \"\"\"\n Interrupts the current input process and returns the current data.\n\n Args:\n input_data (dict): The original input data.\n\n Returns:\n dict: The current data collected so far.\n\n Raises:\n XAgentIOTimeoutError: If the input times out.\n \"\"\"\n try:\n data = self.get_input(input_data)\n return data\n except TimeoutOccurred:\n self.logger.error(f\"Waiting timemout, close connection!\")\n raise XAgentTimeoutError(\"timeout!\")" } ]
import asyncio import json import os import threading import traceback import uuid import sys from contextlib import contextmanager from datetime import datetime from typing import List from colorama import Fore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.blocking import BlockingScheduler from XAgentServer.application.core.envs import XAgentServerEnv from XAgentServer.database.connect import SessionLocal from XAgentServer.enums.status import StatusEnum from XAgentServer.exts.exception_ext import XAgentError from XAgentServer.interaction import XAgentInteraction from XAgentServer.loggers.logs import Logger from XAgentServer.models.interaction import InteractionBase from XAgentServer.models.parameter import InteractionParameter from XAgentServer.models.raw import XAgentRaw from XAgentServer.server import XAgentServer from XAgentServer.application.cruds.interaction import InteractionCRUD from XAgentServer.application.global_val import redis from command_input import CommandLineInput from XAgent.running_recorder import recorder
13,516
role="Assistant", plan=[], upload_files: List[str] = [], download_files: List[str] = [], record_dir: str = None, mode: str = "auto", max_wait_seconds: int = 600, description: str = "XAgent-Test", agent: str = "XAgent", ): self.task = task self.plan = plan self.role = role self.upload_files = upload_files self.download_files = download_files self.record_dir = record_dir # auto is supported only in cmd self.mode = "auto" self.max_wait_seconds = max_wait_seconds self.description = description self.agent = agent class CommandLine(): """ A command-line interface for interacting with XAgentServer. Attributes: env: An instance of the XAgentServer environment. client_id: A unique identifier for the client, generated as a hexadecimal UUID. date_str: The current date as a string in YYYY-MM-DD format. log_dir: The directory where the logs are stored. logger: An instance of the Logger used for logging interactions. interactionDB: A database interface for interacting with either a persistent database (SQLite, MySQL, PostgreSQL) or a local storage file, depending on the configuration of `env`. """ def __init__(self, args: CommandLineParam = None): """ Initialize the CommandLine instance. Args: args (CommandLineParam) : parameters. task is required, mode options: ["auto"] """ self.args = args self.client_id = uuid.uuid4().hex self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger(log_dir=self.log_dir, log_file=f"interact.log") self.logger.typewriter_log( title=f"XAgentServer is running on cmd mode", title_color=Fore.RED) self.logger.info(title=f"XAgentServer log:", title_color=Fore.RED, message=f"{self.log_dir}") self.interrupt = self.args.mode != "auto" self.init_conv_env() self.max_wait_seconds = self.args.max_wait_seconds self.scheduler = AsyncIOScheduler() self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id
@contextmanager def get_db(): """ Provide a transactional scope around a series of operations. """ session = SessionLocal() try: yield session session.commit() except: session.rollback() raise finally: session.close() class CommandLineParam: """Command line parameters. Attributes: task: Task description. role: Role name (default is "Assistant"). plan: List of steps to perform (default is empty list). upload_files: List of files to upload (default is empty list). download_files: List of files to download (default is empty list). record_dir: Directory to store records (default is `None`). mode: Run mode. Can be "auto" (default is "auto"). max_wait_seconds: Maximum wait time in seconds (default is 600). description: Description of the interaction (default is "XAgent-Test"). agent: Agent name (default is "XAgent"). """ def __init__(self, task, role="Assistant", plan=[], upload_files: List[str] = [], download_files: List[str] = [], record_dir: str = None, mode: str = "auto", max_wait_seconds: int = 600, description: str = "XAgent-Test", agent: str = "XAgent", ): self.task = task self.plan = plan self.role = role self.upload_files = upload_files self.download_files = download_files self.record_dir = record_dir # auto is supported only in cmd self.mode = "auto" self.max_wait_seconds = max_wait_seconds self.description = description self.agent = agent class CommandLine(): """ A command-line interface for interacting with XAgentServer. Attributes: env: An instance of the XAgentServer environment. client_id: A unique identifier for the client, generated as a hexadecimal UUID. date_str: The current date as a string in YYYY-MM-DD format. log_dir: The directory where the logs are stored. logger: An instance of the Logger used for logging interactions. interactionDB: A database interface for interacting with either a persistent database (SQLite, MySQL, PostgreSQL) or a local storage file, depending on the configuration of `env`. """ def __init__(self, args: CommandLineParam = None): """ Initialize the CommandLine instance. Args: args (CommandLineParam) : parameters. task is required, mode options: ["auto"] """ self.args = args self.client_id = uuid.uuid4().hex self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger(log_dir=self.log_dir, log_file=f"interact.log") self.logger.typewriter_log( title=f"XAgentServer is running on cmd mode", title_color=Fore.RED) self.logger.info(title=f"XAgentServer log:", title_color=Fore.RED, message=f"{self.log_dir}") self.interrupt = self.args.mode != "auto" self.init_conv_env() self.max_wait_seconds = self.args.max_wait_seconds self.scheduler = AsyncIOScheduler() self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id
parameter = InteractionParameter(
7
2023-10-16 03:44:57+00:00
16k
deepseek-ai/DreamCraft3D
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError" }, { "identifier": "ExporterOutput", "path": "threestudio/models/exporters/base.py", "snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]" }, { "identifier": "parse_optimizer", "path": "threestudio/systems/utils.py", "snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim" }, { "identifier": "parse_scheduler", "path": "threestudio/systems/utils.py", "snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler" }, { "identifier": "Updateable", "path": "threestudio/utils/base.py", "snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def do_update_step_end(self, epoch: int, global_step: int):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)\n self.update_step_end(epoch, global_step)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass\n\n def update_step_end(self, epoch: int, global_step: int):\n pass" }, { "identifier": "update_end_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)" }, { "identifier": "update_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)" }, { "identifier": "parse_structured", "path": "threestudio/utils/config.py", "snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg" }, { "identifier": "C", "path": "threestudio/utils/misc.py", "snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n if len(value) >= 6:\n select_i = 3\n for i in range(3, len(value) - 2, 2):\n if global_step >= value[i]:\n select_i = i + 2\n if select_i != 3:\n start_value, start_step = value[select_i - 3], value[select_i - 2]\n else:\n start_step, start_value = value[:2]\n end_value, end_step = value[select_i - 1], value[select_i]\n value = [start_step, start_value, end_value, end_step]\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value" }, { "identifier": "cleanup", "path": "threestudio/utils/misc.py", "snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()" }, { "identifier": "get_device", "path": "threestudio/utils/misc.py", "snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")" }, { "identifier": "load_module_weights", "path": "threestudio/utils/misc.py", "snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]" }, { "identifier": "find_last_path", "path": "threestudio/utils/misc.py", "snippet": "def find_last_path(path: str):\n if (path is not None) and (\"LAST\" in path):\n path = path.replace(\" \", \"_\")\n base_dir_prefix, suffix = path.split(\"LAST\", 1)\n base_dir = os.path.dirname(base_dir_prefix)\n prefix = os.path.split(base_dir_prefix)[-1]\n base_dir_prefix = os.path.join(base_dir, prefix)\n all_path = os.listdir(base_dir)\n all_path = [os.path.join(base_dir, dir) for dir in all_path]\n filtered_path = [dir for dir in all_path if dir.startswith(base_dir_prefix)]\n filtered_path.sort(reverse=True)\n last_path = filtered_path[0]\n new_path = last_path + suffix\n if os.path.exists(new_path):\n return new_path\n else:\n raise FileNotFoundError(new_path)\n else:\n return path" }, { "identifier": "SaverMixin", "path": "threestudio/utils/saving.py", "snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path" } ]
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import ( Updateable, update_end_if_possible, update_if_possible, ) from threestudio.utils.config import parse_structured from threestudio.utils.misc import C, cleanup, get_device, load_module_weights, find_last_path from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.utils.config import load_config, parse_structured
10,840
self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_predict_epoch_end(self): pass def preprocess_data(self, batch, stage): pass """ Implementing on_after_batch_transfer of DataModule does the same. But on_after_batch_transfer does not support DP. """ def on_train_batch_start(self, batch, batch_idx, unused=0): self.preprocess_data(batch, "train") self.dataset = self.trainer.train_dataloader.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "validation") self.dataset = self.trainer.val_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "test") self.dataset = self.trainer.test_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "predict") self.dataset = self.trainer.predict_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False): pass def on_before_optimizer_step(self, optimizer): """ # some gradient-related debugging goes here, example: from lightning.pytorch.utilities import grad_norm norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self) -> None: self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from) self.cfg.weights = find_last_path(self.cfg.weights) if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self): optim = parse_optimizer(self.cfg.optimizer, self) ret = { "optimizer": optim, } if self.cfg.scheduler is not None: ret.update( { "lr_scheduler": parse_scheduler(self.cfg.scheduler, optim), } ) return ret def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def on_train_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.train_dataloader.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) def on_validation_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.val_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_validation_step: # cleanup to save vram cleanup() def on_validation_epoch_end(self): raise NotImplementedError def test_step(self, batch, batch_idx): raise NotImplementedError def on_test_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.test_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_test_epoch_end(self): pass def predict_step(self, batch, batch_idx): raise NotImplementedError def on_predict_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.predict_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_predict_epoch_end(self): pass def preprocess_data(self, batch, stage): pass """ Implementing on_after_batch_transfer of DataModule does the same. But on_after_batch_transfer does not support DP. """ def on_train_batch_start(self, batch, batch_idx, unused=0): self.preprocess_data(batch, "train") self.dataset = self.trainer.train_dataloader.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "validation") self.dataset = self.trainer.val_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "test") self.dataset = self.trainer.test_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "predict") self.dataset = self.trainer.predict_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False): pass def on_before_optimizer_step(self, optimizer): """ # some gradient-related debugging goes here, example: from lightning.pytorch.utilities import grad_norm norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self) -> None: self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from) self.cfg.weights = find_last_path(self.cfg.weights) if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
10
2023-10-23 07:40:20+00:00
16k
zju3dv/4K4D
easyvolcap/utils/gl_utils.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out" }, { "identifier": "Camera", "path": "easyvolcap/utils/viewer_utils.py", "snippet": "class Camera:\n # Helper class to manage camera parameters\n def __init__(self,\n H: int = 512,\n W: int = 512,\n K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics\n R: torch.Tensor = torch.tensor([[-1.0, 0.0, 0.0,], [0.0, 0.0, -1.0,], [0.0, -1.0, 0.0,]]), # extrinsics\n T: torch.Tensor = torch.tensor([[0.0], [0.0], [-3.0],]), # extrinsics\n n: float = 0.002, # bounds limit\n f: float = 100, # bounds limit\n t: float = 0.0, # temporal dimension (implemented as a float instead of int)\n v: float = 0.0, # view dimension (implemented as a float instead of int)\n bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box\n\n # camera update hyperparameters\n origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),\n world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),\n movement_speed: float = 1.0, # gui movement speed\n\n batch: dotdict = None, # will ignore all other inputs\n string: str = None, # will ignore all other inputs\n **kwargs,\n ) -> None:\n\n # Batch (network input parameters)\n if string is None:\n if batch is None:\n batch = dotdict()\n batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds\n self.from_batch(batch)\n \n # Other configurables\n self.origin = vec3(*origin)\n self.world_up = vec3(*world_up)\n self.movement_speed = movement_speed\n # self.front = self.front # will trigger an update\n else:\n self.from_string(string)\n\n # Internal states to facilitate camera position change\n self.is_dragging = False # rotation\n self.about_origin = False # about origin rotation\n self.is_panning = False # translation\n self.lock_fx_fy = True\n\n @property\n def w2p(self):\n ixt = mat4(self.ixt)\n ixt[3, 3] = 0\n ixt[2, 3] = 1\n return ixt @ self.ext # w2c -> c2p = w2p\n\n @property\n def V(self): return self.c2w\n\n @property\n def ixt(self): return self.K\n\n @property\n def gl_ext(self):\n gl_c2w = self.c2w\n gl_c2w[0] *= 1 # flip x\n gl_c2w[1] *= -1 # flip y\n gl_c2w[2] *= -1 # flip z\n gl_ext = glm.affineInverse(gl_c2w)\n return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt\n\n @property\n def gl_ixt(self):\n # Construct opengl camera matrix with projection & clipping\n # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # fmt: off\n gl_ixt = mat4(\n 2 * self.fx / self.W, 0, 0, 0,\n 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n )\n # fmt: on\n\n return gl_ixt\n\n @property\n def ext(self): return self.w2c\n\n @property\n def w2c(self):\n w2c = mat4(self.R)\n w2c[3] = vec4(*self.T, 1.0)\n return w2c\n\n @property\n def c2w(self):\n return glm.affineInverse(self.w2c)\n\n @property\n def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,\n\n @property\n def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,\n\n @property\n def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,\n\n @front.setter\n def front(self, v: vec3):\n front = v # the last row of R\n self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z\n right = glm.normalize(glm.cross(self.front, self.world_up)) # right\n self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z\n down = glm.cross(self.front, self.right) # down\n self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z\n\n @property\n def center(self): return -glm.transpose(self.R) @ self.T # 3,\n\n @center.setter\n def center(self, v: vec3):\n self.T = -self.R @ v # 3, 1\n\n @property\n def s(self): return self.K[1, 0]\n\n @s.setter\n def s(self, s): self.K[1, 0] = s\n\n @property\n def fx(self): return self.K[0, 0]\n\n @fx.setter\n def fx(self, v: float):\n v = min(v, 1e5)\n v = max(v, 1e-3)\n if self.lock_fx_fy:\n self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]\n self.K[0, 0] = v\n\n @property\n def fy(self): return self.K[1, 1]\n\n @fy.setter\n def fy(self, v: float):\n if self.lock_fx_fy:\n self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]\n self.K[1, 1] = v\n\n @property\n def cx(self): return self.K[2, 0]\n\n @cx.setter\n def cx(self, v: float):\n self.K[2, 0] = v\n\n @property\n def cy(self): return self.K[2, 1]\n\n @cy.setter\n def cy(self, v: float):\n self.K[2, 1] = v\n\n def begin_dragging(self,\n x: float, y: float,\n is_panning: bool,\n about_origin: bool,\n ):\n self.is_dragging = True\n self.is_panning = is_panning\n self.about_origin = about_origin\n self.drag_start = vec2([x, y])\n\n # Record internal states # ? Will this make a copy?\n self.drag_start_front = self.front # a recording\n self.drag_start_down = self.down\n self.drag_start_right = self.right\n self.drag_start_center = self.center\n self.drag_start_origin = self.origin\n self.drag_start_world_up = self.world_up\n\n # Need to find the max or min delta y to align with world_up\n dot = glm.dot(self.world_up, self.drag_start_front)\n self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down\n self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin\n\n def end_dragging(self):\n self.is_dragging = False\n\n def update_dragging(self, x: float, y: float):\n if not self.is_dragging:\n return\n\n current = vec2(x, y)\n delta = current - self.drag_start\n delta /= max(self.H, self.W)\n delta *= -1\n\n if self.is_panning:\n delta *= self.movement_speed\n center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down\n self.center = self.drag_start_center + center_delta\n if self.about_origin:\n self.origin = self.drag_start_origin + center_delta\n else:\n m = mat4(1.0)\n m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)\n m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)\n self.front = m @ self.drag_start_front # might overshoot\n\n if self.about_origin:\n self.center = -m @ (self.origin - self.drag_start_center) + self.origin\n\n def move(self, x_offset: float, y_offset: float):\n speed_factor = 1e-1\n movement = y_offset * speed_factor\n movement = movement * self.front * self.movement_speed\n self.center += movement\n\n if self.is_dragging:\n self.drag_start_center += movement\n\n def to_batch(self):\n meta = dotdict()\n meta.H = torch.as_tensor(self.H)\n meta.W = torch.as_tensor(self.W)\n meta.K = torch.as_tensor(self.K.to_list()).mT\n meta.R = torch.as_tensor(self.R.to_list()).mT\n meta.T = torch.as_tensor(self.T.to_list())[..., None]\n meta.n = torch.as_tensor(self.n)\n meta.f = torch.as_tensor(self.f)\n meta.t = torch.as_tensor(self.t)\n meta.v = torch.as_tensor(self.v)\n meta.bounds = torch.as_tensor(self.bounds.to_list()) # no transpose for bounds\n\n # GUI related elements\n meta.movement_speed = torch.as_tensor(self.movement_speed)\n meta.origin = torch.as_tensor(self.origin.to_list())\n meta.world_up = torch.as_tensor(self.world_up.to_list())\n\n batch = dotdict()\n batch.update(meta)\n batch.meta.update(meta)\n return batch\n\n def to_easymocap(self):\n batch = self.to_batch()\n camera = to_numpy(batch)\n return camera\n\n def from_easymocap(self, camera: dict):\n batch = to_tensor(camera)\n self.from_batch(batch)\n return self\n\n def to_string(self) -> str:\n batch = to_list(self.to_batch().meta)\n return json.dumps(batch)\n\n def from_string(self, string: str):\n batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)\n self.from_batch(batch)\n\n def from_batch(self, batch: dotdict):\n H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds\n\n # Batch (network input parameters)\n self.H = int(H)\n self.W = int(W)\n self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel()) # 3,\n self.n = float(n)\n self.f = float(f)\n self.t = float(t)\n self.v = float(v)\n self.bounds = mat2x3(*bounds.ravel()) # 2, 3\n\n if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)\n if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,\n if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,\n return self\n\n def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):\n # self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel())" }, { "identifier": "cm_cpu_store", "path": "easyvolcap/utils/color_utils.py", "snippet": "def colormap(v: torch.Tensor, cm: str = 'virdis'):\ndef colormap_linear(v: torch.Tensor, cm: NoneType = None):\ndef colormap_dict(v: torch.Tensor, cm: torch.Tensor):\ndef colormap_list(v: torch.Tensor, cm: torch.Tensor):\ndef yuv_to_rgb(x):\ndef rgb_to_yuv(x):\ndef image_derivative(img: torch.Tensor, mode='sobel', normalized=True) -> torch.Tensor:\ndef image_pyramid(input: torch.Tensor, max_level: int = 4) -> List[torch.Tensor]:\ndef variance_of_laplacian(img: torch.Tensor):" }, { "identifier": "depth_curve_fn", "path": "easyvolcap/utils/depth_utils.py", "snippet": "def depth_curve_fn(depth: torch.Tensor, p: float = 0.01, cm: str = 'linear'):\n depth = normalize_depth(depth)\n depth = colormap(depth, cm)\n return depth" }, { "identifier": "load_pts", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_pts(filename: str):\n from pyntcloud import PyntCloud\n cloud = PyntCloud.from_file(filename)\n verts = cloud.xyz\n if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:\n r = np.asarray(cloud.points['red'])\n g = np.asarray(cloud.points['green'])\n b = np.asarray(cloud.points['blue'])\n colors = np.stack([r, g, b], axis=-1) / 255\n elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:\n r = np.asarray(cloud.points['r'])\n g = np.asarray(cloud.points['g'])\n b = np.asarray(cloud.points['b'])\n colors = np.stack([r, g, b], axis=-1) / 255\n else:\n colors = None\n\n if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:\n nx = np.asarray(cloud.points['nx'])\n ny = np.asarray(cloud.points['ny'])\n nz = np.asarray(cloud.points['nz'])\n norms = np.stack([nx, ny, nz], axis=-1)\n else:\n norms = None\n\n if 'alpha' in cloud.points:\n cloud.points['alpha'] = cloud.points['alpha'] / 255\n\n reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']\n scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added\n return verts, colors, norms, scalars" }, { "identifier": "load_mesh", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):\n from pytorch3d.io import load_ply, load_obj\n if backend == 'trimesh':\n import trimesh\n mesh: trimesh.Trimesh = trimesh.load(filename)\n return mesh.vertices, mesh.faces\n\n vm, fm = None, None\n if filename.endswith('.npz'):\n mesh = np.load(filename)\n v = torch.from_numpy(mesh['verts'])\n f = torch.from_numpy(mesh['faces'])\n\n if load_uv:\n vm = torch.from_numpy(mesh['uvs'])\n fm = torch.from_numpy(mesh['uvfaces'])\n else:\n if filename.endswith('.ply'):\n v, f = load_ply(filename)\n elif filename.endswith('.obj'):\n v, faces_attr, aux = load_obj(filename)\n f = faces_attr.verts_idx\n\n if load_uv:\n vm = aux.verts_uvs\n fm = faces_attr.textures_idx\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')\n\n v = v.to(device, non_blocking=True).contiguous()\n f = f.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n vm = vm.to(device, non_blocking=True).contiguous()\n fm = fm.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n if load_aux:\n return v, f, vm, fm, aux\n else:\n return v, f, vm, fm\n else:\n return v, f" }, { "identifier": "to_cuda", "path": "easyvolcap/utils/data_utils.py", "snippet": "def to_cuda(batch, device=\"cuda\", ignore_list: bool = False) -> torch.Tensor:\n if isinstance(batch, (tuple, list)):\n batch = [to_cuda(b, device, ignore_list) for b in batch]\n elif isinstance(batch, dict):\n batch = dotdict({k: (to_cuda(v, device, ignore_list) if k != \"meta\" else v) for k, v in batch.items()})\n elif isinstance(batch, torch.Tensor):\n batch = batch.to(device, non_blocking=True)\n else: # numpy and others\n batch = torch.as_tensor(batch, device=device)\n return batch" }, { "identifier": "prepare_feedback_transform", "path": "easyvolcap/utils/fcds_utils.py", "snippet": "def prepare_feedback_transform(H: int, W: int, K: torch.Tensor, R: torch.Tensor, T: torch.Tensor,\n n: torch.Tensor,\n f: torch.Tensor,\n xyz: torch.Tensor,\n rgb: torch.Tensor,\n rad: torch.Tensor):\n ixt = get_ndc_perspective_matrix(K, H, W, n[..., 0], f[..., 0]).to(xyz.dtype) # to opengl, remove last dim of n and f\n w2c = affine_padding(torch.cat([R, T], dim=-1)).to(xyz.dtype)\n c2w = affine_inverse(w2c)\n c2w[..., 0] *= 1 # flip x\n c2w[..., 1] *= -1 # flip y\n c2w[..., 2] *= -1 # flip z\n ext = affine_inverse(c2w)\n pix_xyz = torch.cat([xyz, torch.ones_like(xyz[..., :1])], dim=-1) @ ext.mT @ ixt.mT\n pix_rad = abs(H * ixt[..., 1, 1][..., None, None] * rad / pix_xyz[..., -1:]) # z: B, 1 * B, N, world space radius -> ndc radius B, N, 1\n\n # Prepare data to be rendered\n data = torch.cat([pix_xyz, rgb, pix_rad], dim=-1).ravel() # organize the data inside vbo\n return data" }, { "identifier": "get_opencv_camera_params", "path": "easyvolcap/utils/fcds_utils.py", "snippet": "def get_opencv_camera_params(batch: dotdict):\n H = batch.meta.H[0].item() # !: BATCH\n W = batch.meta.W[0].item() # !: BATCH\n K = batch.K\n R = batch.R\n T = batch.T\n C = -batch.R.mT @ batch.T # B, 3, 1\n return H, W, K, R, T, C" }, { "identifier": "typed", "path": "easyvolcap/utils/net_utils.py", "snippet": "def typed(input_to: torch.dtype = torch.float, output_to: torch.dtype = torch.float):\n from easyvolcap.utils.data_utils import to_x\n\n def wrapper(func: Callable):\n def inner(*args, **kwargs):\n args = to_x(args, input_to)\n kwargs = to_x(kwargs, input_to)\n ret = func(*args, **kwargs)\n ret = to_x(ret, output_to)\n return ret\n return inner\n return wrapper" }, { "identifier": "multi_gather", "path": "easyvolcap/utils/net_utils.py", "snippet": "def multi_gather(values: torch.Tensor, indices: torch.Tensor, dim=-2):\n # Gather the value at the -2th dim of values, augment index shape on the back\n # Example: values: B, P, 3, index: B, N, -> B, N, 3\n\n # index will first be augmented to match the values' dimentionality at the back\n # take care of batch dimension of, and acts like a linear indexing in the target dimention\n # we assume that the values's second to last dimension is the dimension to be indexed on\n return values.gather(dim, multi_indexing(indices, values.shape, dim))" }, { "identifier": "create_meshgrid", "path": "easyvolcap/utils/net_utils.py", "snippet": "@torch.jit.script\ndef create_meshgrid(H: int, W: int, device: torch.device = torch.device('cuda'), indexing: str = 'ij', ndc: bool = False,\n correct_pix: bool = True, dtype: torch.dtype = torch.float):\n # kornia has meshgrid, but not the best\n i = torch.arange(H, device=device, dtype=dtype)\n j = torch.arange(W, device=device, dtype=dtype)\n if correct_pix:\n i = i + 0.5\n j = j + 0.5\n if ndc:\n i = i / H * 2 - 1\n j = j / W * 2 - 1\n ij = torch.meshgrid(i, j, indexing=indexing) # defaults to ij\n ij = torch.stack(ij, dim=-1) # Ht, Wt, 2\n\n return ij" }, { "identifier": "volume_rendering", "path": "easyvolcap/utils/net_utils.py", "snippet": "def volume_rendering(rgb: torch.Tensor, occ: torch.Tensor, bg_brightness: float = 0.0):\n # NOTE: here occ's last dim is not 1, but n_samples\n # rgb: n_batch, n_rays, n_samples, 3\n # occ: n_batch, n_rays, n_samples, 1\n # bg_image: n_batch, n_rays, 3 or None, if this is given as not None, the last sample on the ray will be replaced by this value (assuming this lies on the background)\n # We need to assume:\n # 1. network will find the True geometry, thus giving the background image its real value\n # 2. background image is rendered in a non-static fasion\n # returns:\n # weights: n_batch, n_rays, n_samples\n # rgb_map: n_batch, n_rays, 3\n # acc_map: n_batch, n_rays, 1\n\n weights = render_weights(occ) # (n_batch, n_rays, n_samples)\n rgb_map, acc_map = render_rgb_acc(weights, rgb)\n rgb_map = rgb_map + (1. - acc_map) * bg_brightness\n\n return weights, rgb_map, acc_map" }, { "identifier": "raw2alpha", "path": "easyvolcap/utils/net_utils.py", "snippet": "def raw2alpha(raws: torch.Tensor, dists=0.005, bias=0.0):\n if isinstance(dists, torch.Tensor):\n if dists.ndim == raws.ndim - 1:\n dists = dists[..., None]\n return 1. - torch.exp(-(raws + bias) * dists)" }, { "identifier": "torch_dtype_to_numpy_dtype", "path": "easyvolcap/utils/net_utils.py", "snippet": "def torch_dtype_to_numpy_dtype(torch_dtype):\n mapping = {\n torch.float32: np.float32,\n torch.float64: np.float64,\n torch.int32: np.int32,\n torch.int64: np.int64,\n torch.int16: np.int16,\n torch.uint8: np.uint8,\n torch.int8: np.int8,\n torch.bool: np.bool_\n }\n return mapping.get(torch_dtype, None)" }, { "identifier": "load_pretrained", "path": "easyvolcap/utils/net_utils.py", "snippet": "def load_pretrained(model_dir: str, resume: bool = True, epoch: int = -1, ext: str = '.npz', remove_if_not_resuming: bool = False, warn_if_not_exist: bool = False):\n if not resume: # remove nothing here\n if remove_if_not_resuming:\n if os.path.isdir(model_dir) and len(os.listdir(model_dir)): # only inform the use if there are files\n # log(red(f\"Removing trained weights: {blue(model_dir)}\"))\n try: run(f'rm -r {model_dir}')\n except: pass\n return None, None\n\n if not os.path.exists(model_dir):\n if warn_if_not_exist:\n log(red(f'Pretrained network: {blue(model_dir)} does not exist'))\n return None, None\n if os.path.isdir(model_dir):\n pts = [\n int(pt.split('.')[0]) for pt in os.listdir(model_dir) if pt != f'latest{ext}' and pt.endswith(ext) and pt.split('.')[0].isnumeric()\n ]\n if len(pts) == 0 and f'latest{ext}' not in os.listdir(model_dir):\n return None, None\n if epoch == -1:\n if f'latest{ext}' in os.listdir(model_dir):\n pt = 'latest'\n else:\n pt = max(pts)\n else:\n pt = epoch\n model_path = join(model_dir, f'{pt}{ext}')\n else:\n model_path = model_dir\n\n if ext == '.pt' or ext == '.pth':\n pretrained = dotdict(torch.load(model_path, 'cpu'))\n else:\n from easyvolcap.utils.data_utils import to_tensor\n pretrained = dotdict(model=to_tensor(dict(**np.load(model_path))), epoch=-1) # the npz files do not contain training parameters\n\n return pretrained, model_path" }, { "identifier": "get_bounds", "path": "easyvolcap/utils/net_utils.py", "snippet": "def get_bounds(xyz, padding=0.05): # 5mm padding? really?\n # xyz: n_batch, n_points, 3\n\n min_xyz = torch.min(xyz, dim=1)[0] # torch min with dim is ...\n max_xyz = torch.max(xyz, dim=1)[0]\n min_xyz -= padding\n max_xyz += padding\n bounds = torch.stack([min_xyz, max_xyz], dim=1)\n return bounds\n diagonal = bounds[..., 1:] - bounds[..., :1] # n_batch, 1, 3\n bounds[..., 1:] = bounds[..., :1] + torch.ceil(diagonal / voxel_size) * voxel_size # n_batch, 1, 3\n return bounds" }, { "identifier": "CHECK_CUDART_ERROR", "path": "easyvolcap/utils/net_utils.py", "snippet": "def CHECK_CUDART_ERROR(args):\n from cuda import cudart\n\n if isinstance(args, tuple):\n assert len(args) >= 1\n err = args[0]\n if len(args) == 1:\n ret = None\n elif len(args) == 2:\n ret = args[1]\n else:\n ret = args[1:]\n else:\n err = args\n ret = None\n\n assert isinstance(err, cudart.cudaError_t), type(err)\n if err != cudart.cudaError_t.cudaSuccess:\n raise RuntimeError(FORMAT_CUDART_ERROR(err))\n\n return ret" }, { "identifier": "FORMAT_CUDART_ERROR", "path": "easyvolcap/utils/net_utils.py", "snippet": "def FORMAT_CUDART_ERROR(err):\n from cuda import cudart\n return (\n f\"{cudart.cudaGetErrorName(err)[1].decode('utf-8')}({int(err)}): \"\n f\"{cudart.cudaGetErrorString(err)[1].decode('utf-8')}\"\n )" } ]
from typing import TYPE_CHECKING from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL from torch import nn from enum import Enum, auto from os.path import join, dirname from typing import Dict, Union, List from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.color_utils import cm_cpu_store from easyvolcap.utils.depth_utils import depth_curve_fn from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params from easyvolcap.utils.net_utils import typed, multi_gather, create_meshgrid, volume_rendering, raw2alpha, torch_dtype_to_numpy_dtype, load_pretrained, get_bounds from easyvolcap.utils.net_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager from OpenGL.GL import shaders from pytorch3d.structures import Pointclouds, Meshes from pytorch3d.structures import Pointclouds, Meshes from cuda import cudart from cuda import cudart from cuda import cudart from easyvolcap.engine.registry import call_from_cfg from easyvolcap.utils.gaussian_utils import GaussianModel from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart import os import glm import torch import ctypes import numpy as np import sys import OpenGL.GL as gl
12,967
print(str(e).encode('utf-8').decode('unicode_escape')) raise e def init_gl_buffers(self, v: int = 0, f: int = 0): if hasattr(self, 'cu_vbo'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo)) super().init_gl_buffers(v, f) # Register vertex buffer obejct flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags)) def init_textures(self): if hasattr(self, 'cu_read_index'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower)) if hasattr(self, 'write_fbo'): gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo]) gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach]) self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) # Register image to read from flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags)) self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags)) self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags)) self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags)) log(f'Created texture of h, w: {self.max_H}, {self.max_W}') def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.splat_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) # The actual multi pass rendering process happens here for pass_index in range(self.pts_per_pix): # Swap buffers to render the next pass front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \ back_fbo, back_index, back_lower, front_fbo, front_index, front_lower # Bind the read texture and bind the write render frame buffer gl.glBindTextures(0, 2, [front_index, front_lower]) # Move content from write_fbo to screen fbo if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo) gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) else: # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures # Clear depth buffer for depth testing gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict, return_frags: bool = False, return_full: bool = False, ): """ Get all indices from the depth peeling passes Compute the vertex weight here in torch(cuda) Use the indices to pass through a compositor The backward pass should only be valid on the torch side, and it should've been enough TODO: This function is too memory intensive TODO: Performing IBR is too memory intensive """ # This the slow part, but not differentiable idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K msk = idx != -1 # B, H, W, K idx = torch.where(msk, idx, 0).long() # Sample things needed for computing screen space weight H, W, K, R, T, C = get_opencv_camera_params(batch) K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype) pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3 pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10) pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
from __future__ import annotations if TYPE_CHECKING: # fmt: off # Environment variable messaging # Need to export EGL_DEVICE_ID before trying to import egl # And we need to consider the case when we're performing distributed training # from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS if 'easyvolcap.engine' in sys.modules and (sys.modules['easyvolcap.engine'].args.type != 'gui' or sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type == 'UnitySocketViewer'): # FIXME: GLOBAL VARIABLES try: except Exception as e: log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}')) os.environ['PYOPENGL_PLATFORM'] = '' try: except Exception as e: print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:') print(f'pip install git+https://github.com/mcfletch/pyopengl') raise e # fmt: on def linearize_depth(d, n: float, f: float): # 0-1 -> -1,1 # ndc -> view return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n)) def common_opengl_options(): # Use program point size gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) # Performs face culling gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) # Performs alpha trans testing gl.glEnable(gl.GL_ALPHA_TEST) # Performs z-buffer testing gl.glEnable(gl.GL_DEPTH_TEST) # gl.glDepthMask(gl.GL_TRUE) gl.glDepthFunc(gl.GL_LEQUAL) # gl.glDepthRange(-1.0, 1.0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) # Enable some masking tests gl.glEnable(gl.GL_SCISSOR_TEST) # Enable this to correctly render points # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310 gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory. # # The second argument specifies that our pixels will be in bytes. # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) def load_shader_source(file: str = 'splat.frag'): # Ideally we can just specify the shader name instead of an variable if not exists(file): file = f'{dirname(__file__)}/shaders/{file}' if not exists(file): file = file.replace('shaders/', '') if not exists(file): raise RuntimeError(f'Shader file: {file} does not exist') with open(file, 'r') as f: return f.read() def use_gl_program(program: Union[shaders.ShaderProgram, dict]): if isinstance(program, dict): # Recompile the program if the user supplied sources program = dotdict(program) program = shaders.compileProgram( shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER), shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER) ) return gl.glUseProgram(program) class Mesh: class RenderType(Enum): POINTS = 1 LINES = 2 TRIS = 3 QUADS = 4 # TODO: Support quad loading STRIPS = 5 # Helper class to render a mesh on opengl # This implementation should only be used for debug visualization # Since no differentiable mechanism will be added # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly def __init__(self, verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict(), render_type: RenderType = RenderType.TRIS, # Misc info name: str = 'mesh', filename: str = '', visible: bool = True, # Render options shade_flat: bool = False, # smooth shading point_radius: float = 0.015, render_normal: bool = False, # Storage options store_device: str = 'cpu', compute_device: str = 'cuda', vert_sizes=[3, 3, 3], # pos + color + norm # Init options est_normal_thresh: int = 100000, # Ignore unused input **kwargs, ) -> None: super().__init__() self.name = name self.visible = visible self.render_type = render_type self.shade_flat = shade_flat self.point_radius = point_radius self.render_normal = render_normal self.store_device = store_device self.compute_device = compute_device self.vert_sizes = vert_sizes self.est_normal_thresh = est_normal_thresh # Uniform and program self.compile_shaders() self.uniforms = dotdict() # uniform values # Before initialization self.max_verts = 0 self.max_faces = 0 # OpenGL data if filename: self.load_from_file(filename) else: self.load_from_data(verts, faces, colors, normals, scalars) def compile_shaders(self): try: self.mesh_program = shaders.compileProgram( shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER) ) self.point_program = shaders.compileProgram( shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e @property def n_verts_bytes(self): return len(self.verts) * self.vert_size * self.verts.element_size() @property def n_faces_bytes(self): return len(self.faces) * self.face_size * self.faces.element_size() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts @property def faces_data(self): # a heavy copy operation faces = self.faces.ravel().numpy() # N, 3 faces = np.asarray(faces, dtype=np.uint32, order='C') return faces @property def face_size(self): return self.render_type.value @property def vert_size(self): return sum(self.vert_sizes) def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'): verts, faces, colors, normals, scalars = self.load_data_from_file(filename) self.load_from_data(verts, faces, colors, normals, scalars) def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'): self.name = os.path.split(filename)[-1] verts, faces, colors, normals, scalars = None, None, None, None, None verts, faces = load_mesh(filename, device=self.store_device) if not len(faces): verts, colors, normals, scalars = load_pts(filename) self.render_type = Mesh.RenderType.POINTS else: self.render_type = Mesh.RenderType(faces.shape[-1]) # use value return verts, faces, colors, normals, scalars def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()): # Data type conversion verts = torch.as_tensor(verts) # convert to tensor if input is of other types if verts.dtype == torch.float32: pass # supports this for now elif verts.dtype == torch.float16: pass # supports this for now else: verts = verts.type(torch.float) # convert to float32 if input is of higher precision gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT self.vert_gl_types = [gl_dtype] * len(self.vert_sizes) # Prepare main mesh data: vertices and faces self.verts = torch.as_tensor(verts, device=self.store_device) self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support # Prepare colors and normals if colors is not None: self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype) else: bounds = get_bounds(self.verts[None])[0] self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0]) if normals is not None: self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype) else: self.estimate_vertex_normals() # Prepare other scalars if scalars is not None: for k, v in scalars.items(): setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok? # Prepare OpenGL related buffer self.update_gl_buffers() def estimate_vertex_normals(self): def est_pcd_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: pcd = Pointclouds([self.verts]).to(self.compute_device) self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim def est_tri_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: mesh = Meshes([self.verts], [self.faces]).to(self.compute_device) self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim if not len(self.verts) > self.est_normal_thresh: if self.render_type == Mesh.RenderType.TRIS: est_tri_norms() elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms() else: # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping')) self.normals = self.verts else: # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation')) self.normals = self.verts def offscreen_render(self, eglctx: "eglContextManager", camera: Camera): eglctx.resize(camera.W, camera.H) self.render(camera) def render(self, camera: Camera): if not self.visible: return # For point rendering if self.render_type == Mesh.RenderType.POINTS: gl.glUseProgram(self.point_program) self.use_gl_program(self.point_program) else: gl.glUseProgram(self.mesh_program) self.use_gl_program(self.mesh_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) if self.render_type == Mesh.RenderType.POINTS: gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices elif self.render_type == Mesh.RenderType.LINES: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.TRIS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.QUADS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.STRIPS: gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) else: raise NotImplementedError gl.glBindVertexArray(0) def use_gl_program(self, program: shaders.ShaderProgram): use_gl_program(program) self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat") self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius") self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal") self.uniforms.H = gl.glGetUniformLocation(program, "H") self.uniforms.W = gl.glGetUniformLocation(program, "W") self.uniforms.n = gl.glGetUniformLocation(program, "n") self.uniforms.f = gl.glGetUniformLocation(program, "f") self.uniforms.P = gl.glGetUniformLocation(program, "P") self.uniforms.K = gl.glGetUniformLocation(program, "K") self.uniforms.V = gl.glGetUniformLocation(program, "V") self.uniforms.M = gl.glGetUniformLocation(program, "M") def upload_gl_uniforms(self, camera: Camera): K = camera.gl_ixt # hold the reference V = camera.gl_ext # hold the reference M = glm.identity(mat4) P = K * V * M gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat) gl.glUniform1f(self.uniforms.point_radius, self.point_radius) gl.glUniform1i(self.uniforms.render_normal, self.render_normal) gl.glUniform1i(self.uniforms.H, camera.H) # o2w gl.glUniform1i(self.uniforms.W, camera.W) # o2w gl.glUniform1f(self.uniforms.n, camera.n) # o2w gl.glUniform1f(self.uniforms.f, camera.f) # o2w gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w def update_gl_buffers(self): # Might be overwritten self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0, len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated if hasattr(self, 'verts'): gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference if hasattr(self, 'faces'): gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data) def resize_buffers(self, v: int = 0, f: int = 0): if v > self.max_verts or f > self.max_faces: if v > self.max_verts: self.max_verts = v if f > self.max_faces: self.max_faces = f self.init_gl_buffers(v, f) def init_gl_buffers(self, v: int = 0, f: int = 0): # This will only init the corresponding buffer object n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes # Housekeeping if hasattr(self, 'vao'): gl.glDeleteVertexArrays(1, [self.vao]) gl.glDeleteBuffers(2, [self.vbo, self.ebo]) self.vao = gl.glGenVertexArrays(1) self.vbo = gl.glGenBuffers(1) self.ebo = gl.glGenBuffers(1) gl.glBindVertexArray(self.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao cumsum = 0 for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)): gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float gl.glEnableVertexAttribArray(i) cumsum += s if n_faces_bytes > 0: # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) gl.glBindVertexArray(0) def render_imgui(self): pass class Quad(Mesh): # A shared texture for CUDA (pytorch) and OpenGL # Could be rendererd to screen using blitting or just drawing a quad def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip self.use_cudagl = use_cudagl self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.update_gl_buffers() self.compile_shaders() self.max_H, self.max_W = H, W self.H, self.W = H, W self.compose = compose self.compose_power = compose_power self.init_texture() @property def n_faces_bytes(self): return 0 def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) self.uniforms.tex = gl.glGetUniformLocation(program, 'tex') gl.glUseProgram(self.quad_program) # use a different program gl.glUniform1i(self.uniforms.tex, 0) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_texture() def init_texture(self): if hasattr(self, 'cu_tex'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex)) if hasattr(self, 'fbo'): gl.glDeleteFramebuffers(1, [self.fbo]) gl.glDeleteTextures(1, [self.tex]) # Init the texture to be blit onto the screen self.tex = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Init the framebuffer object if explicit blitting is used (slower than drawing quad) self.fbo = gl.glGenFramebuffers(1) old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo) if self.use_cudagl: if self.compose: # Both reading and writing of this resource is required flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone else: flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags)) def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0): assert self.use_cudagl, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad" w = w or self.W h = h or self.H if image.shape[-1] == 3: image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0)) if self.compose: """ Blit current framebuffer to this texture (self.tex) Read content of this texture into a cuda buffer Perform alpha blending based on the frame's alpha channel Copy the blended image back into the texture (self.tex) """ old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0 gl.glBlitFramebuffer(x, y, w, h, x, y, w, h, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old) buffer = torch.empty_like(image) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst w * 4 * buffer.element_size(), # dpitch cu_tex_arr, # src x * 4 * image.element_size(), # wOffset y, # hOffset w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes) h, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]]) alpha = image[..., -1:] / 255 image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int image[..., -1:] = buffer[..., -1:] + image[..., -1:] image = image.clip(0, 255) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr, x * 4 * image.element_size(), y, image.data_ptr(), w * 4 * image.element_size(), # differently sized w * 4 * image.element_size(), # rgba, should do a composition first h, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) def upload_to_texture(self, ptr: np.ndarray): H, W = ptr.shape[:2] H, W = min(self.H, H), min(self.W, W) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down? @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def render(self, camera: Camera = None): self.draw() # no uploading needed def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ Upload the texture instead of the camera This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(x, y, w, h) gl.glScissor(x, y, w, h) # only render in this small region of the viewport gl.glUseProgram(self.quad_program) # use a different program gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) gl.glBindVertexArray(0) # Some house keepings gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0 gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped x, y, x + w, y + h, # the height is flipped gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old) class UQuad(Mesh): """ Responsible for initializing textures with a single value or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing) Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte """ def __init__(self): self.n_blit_values = 3 self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.compile_shaders() self.uniforms = dotdict() # uniform values self.use_gl_programs(self.quad_program) self.update_gl_buffers() @property def n_faces_bytes(self): return 0 @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def use_gl_programs(self, program: shaders.ShaderProgram): for i in range(self.n_blit_values): self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}') for i in range(self.n_blit_values): self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}') gl.glUseProgram(self.program) # use a different program for i in range(self.n_blit_values): self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}') gl.glUniform1i(self.uniforms[f'tex{i}'], i) def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]): for i, v in enumerate(values): v = vec4(v) # HACK: Hold the reference for this upload gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array for i, v in enumerate(use_texs): gl.glUniform1i(self.uniforms[f'use_tex{i}'], v) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): """ This function will render 'value' to the currently bound framebuffer, up to six outputs """ old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) gl.glUseProgram(self.quad_program) self.upload_gl_uniforms(values, use_texs) # should be a noop # Prepare to render to textures gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices gl.glBindVertexArray(old_vao) gl.glUseProgram(old_prog) class DQuad(UQuad): def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC) gl.glDepthFunc(gl.GL_ALWAYS) super().draw(values, use_texs) gl.glDepthFunc(old_function) def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F): # Prepare for write frame buffers color_buffer = gl.glGenTextures(1) depth_upper = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering batch = to_cuda(camera.to_batch()) rgb, acc, dpt = self.gaussian_model.render(batch) if self.render_depth: rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4 else: rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4 # Copy rendered tensor to screen rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform self.quad.copy_to_texture(rgba) self.quad.render() class Splat(Mesh): def __init__(self, *args, H: int = 512, W: int = 512, tex_dtype: str = torch.half, pts_per_pix: int = 24, # render less for the static background since we're only doing a demo blit_last_ratio: float = 0.0, volume_rendering: bool = True, radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better point_smooth: bool = True, alpha_blending: bool = True, **kwargs): kwargs = dotdict(kwargs) kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1]) self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F super().__init__(*args, **kwargs) self.use_gl_program(self.splat_program) self.pts_per_pix = pts_per_pix self.blit_last_ratio = blit_last_ratio self.volume_rendering = volume_rendering self.radii_mult_volume = radii_mult_volume self.radii_mult_solid = radii_mult_solid self.point_smooth = point_smooth self.alpha_blending = alpha_blending self.max_H, self.max_W = H, W self.H, self.W = H, W self.init_textures() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once return verts def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) # Special controlling variables self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending') self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth') self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult') # Special rendering variables self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index') self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color') self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper') self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower') gl.glUniform1i(self.uniforms.read_color, 0) gl.glUniform1i(self.uniforms.read_upper, 1) gl.glUniform1i(self.uniforms.read_lower, 2) def compile_shaders(self): try: self.splat_program = shaders.compileProgram( shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER) ) self.usplat_program = shaders.compileProgram( shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def rasterize(self, camera: Camera = None, length: int = None): if self.volume_rendering: return self.rasterize_volume(camera, length) else: return self.rasterize_solid(camera, length) def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera """ Let's try to analyze what's happening here We want to: 1. Render the front-most color to color buffer 2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer 3. Render the front-most depth + some small margin to a depth lower limit buffer 4. Switch between the render target and sampling target 5. Use the previous rendered color, depth upper limit and lower limit as textures 6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard 7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard 8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly 9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle) 10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back) Required cleanup checklist: 1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this 2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader """ front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9]) gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9]) gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) # The actual multi pass rendering process happens here for pass_index in range(self.pts_per_pix): # Swap buffers to render the next pass front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \ back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower # Bind the read texture and bind the write render frame buffer gl.glBindTextures(0, 3, [front_color, front_upper, front_lower]) # Move content from write_fbo to screen fbo if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo) for i in range(3): gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i) gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Clear depth buffer for depth testing gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return back_fbo def upload_gl_uniforms(self, camera: Camera): super().upload_gl_uniforms(camera) gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth) gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending) if self.volume_rendering: gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult else: gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult def rasterize_solid(self, camera: Camera = None, length: int = None): # Only clear the output once back_fbo = self.write_fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color # gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.usplat_program) self.upload_gl_uniforms(camera) gl.glUniform1i(self.uniforms.pass_index, 0) # pass index gl.glBindVertexArray(self.vao) # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return back_fbo def show(self, back_fbo: int): # Move content from write_fbo to screen fbo gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) def render(self, camera): if not self.visible: return self.show(self.rasterize(camera)) def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_textures() def init_textures(self): if hasattr(self, 'write_fbo'): gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo]) gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach]) self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype) self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype) log(f'Created texture of h, w: {self.max_H}, {self.max_W}') class HardwareRendering(Splat): def __init__(self, dtype=torch.half, **kwargs, ): self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT super().__init__(**kwargs, blit_last_ratio=0.90, vert_sizes=[3, 3, 1, 1], ) # verts, color, radius, alpha @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once return verts def init_gl_buffers(self, v: int = 0, f: int = 0): if hasattr(self, 'cu_vbo'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo)) super().init_gl_buffers(v, f) # Register vertex buffer obejct flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags)) def init_textures(self): if hasattr(self, 'cu_read_color'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower)) super().init_textures() # Register image to read from flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags)) self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags)) self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags)) self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags)) def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict): """ Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map. Args: xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points. rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points. rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points. batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch. Returns: A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors. The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1). The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size. It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor. The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class. Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1). """ kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice # !: BATCH H, W = batch.meta.H[0].item(), batch.meta.W[0].item() self.resize_textures(H, W) # maybe resize the texture self.resize_buffers(xyz.shape[1]) # maybe resize the buffer _, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) # only render in this small region of the viewport # Prepare for input data data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel() # Upload to opengl for rendering CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream)) cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo)) assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side' CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr, data.data_ptr(), data.numel() * data.element_size(), kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream)) # Perform rasterization (depth peeling using OpenGL) if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo # Copy rendered image and depth back as tensor cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling # Prepare the output # !: BATCH rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl # The resources in resources may be accessed by CUDA until they are unmapped. # The graphics API from which resources were registered should not access any resources while they are mapped by CUDA. # If an application does so, the results are undefined. CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0)) cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0)) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst W * 4 * rgb_map.element_size(), # dpitch cu_tex_arr, # src 0, # wOffset 0, # hOffset W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes) H, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(), W * 1 * dpt_map.element_size(), cu_dpt_arr, 0, 0, W * 1 * dpt_map.element_size(), H, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC # Ouput reshaping rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1) rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:] dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map) # Some house keepings gl.glViewport(0, 0, old_W, old_H) gl.glScissor(0, 0, old_W, old_H) return rgb_map, acc_map, dpt_map class HardwarePeeling(Splat): def __init__(self, dtype=torch.float, **kwargs): self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT super().__init__(**kwargs, blit_last_ratio=-10.0, vert_sizes=[3, 1], ) # verts, radius, index # from pytorch3d.renderer import AlphaCompositor # self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once return verts def use_gl_program(self, program): super().use_gl_program(program) gl.glUseProgram(self.splat_program) # use a different program self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index') self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower') gl.glUniform1i(self.uniforms.read_index, 0) gl.glUniform1i(self.uniforms.read_lower, 1) def upload_gl_uniforms(self, camera: Camera): super().upload_gl_uniforms(camera) def compile_shaders(self): try: self.splat_program = shaders.compileProgram( shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def init_gl_buffers(self, v: int = 0, f: int = 0): if hasattr(self, 'cu_vbo'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo)) super().init_gl_buffers(v, f) # Register vertex buffer obejct flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags)) def init_textures(self): if hasattr(self, 'cu_read_index'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower)) if hasattr(self, 'write_fbo'): gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo]) gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach]) self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W) # Register image to read from flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags)) self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags)) self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags)) self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags)) log(f'Created texture of h, w: {self.max_H}, {self.max_W}') def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # Prepare for the actual rendering, previous operations could rebind the vertex array self.use_gl_program(self.splat_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) # The actual multi pass rendering process happens here for pass_index in range(self.pts_per_pix): # Swap buffers to render the next pass front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \ back_fbo, back_index, back_lower, front_fbo, front_index, front_lower # Bind the read texture and bind the write render frame buffer gl.glBindTextures(0, 2, [front_index, front_lower]) # Move content from write_fbo to screen fbo if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo) gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1) gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) else: # Only clear the output once gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures # Clear depth buffer for depth testing gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing # The actual drawing pass with render things out to the write_fbo gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering # Restore states of things gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindVertexArray(0) return def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict, return_frags: bool = False, return_full: bool = False, ): """ Get all indices from the depth peeling passes Compute the vertex weight here in torch(cuda) Use the indices to pass through a compositor The backward pass should only be valid on the torch side, and it should've been enough TODO: This function is too memory intensive TODO: Performing IBR is too memory intensive """ # This the slow part, but not differentiable idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K msk = idx != -1 # B, H, W, K idx = torch.where(msk, idx, 0).long() # Sample things needed for computing screen space weight H, W, K, R, T, C = get_opencv_camera_params(batch) K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype) pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3 pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10) pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
10
2023-10-17 04:48:46+00:00
16k
0xbitches/sd-webui-lcm
scripts/main.py
[ { "identifier": "LCMScheduler", "path": "lcm/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic\n methods the library implements for all schedulers such as loading and saving.\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n beta_schedule (`str`, defaults to `\"linear\"`):\n The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, *optional*):\n Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.\n clip_sample (`bool`, defaults to `True`):\n Clip the predicted sample for numerical stability.\n clip_sample_range (`float`, defaults to 1.0):\n The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.\n set_alpha_to_one (`bool`, defaults to `True`):\n Each diffusion step uses the alphas product value at that step and at the previous one. For the final step\n there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the alpha value at step 0.\n steps_offset (`int`, defaults to 0):\n An offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable\n Diffusion.\n prediction_type (`str`, defaults to `epsilon`, *optional*):\n Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),\n `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen\n Video](https://imagen.research.google/video/paper.pdf) paper).\n thresholding (`bool`, defaults to `False`):\n Whether to use the \"dynamic thresholding\" method. This is unsuitable for latent-space diffusion models such\n as Stable Diffusion.\n dynamic_thresholding_ratio (`float`, defaults to 0.995):\n The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.\n sample_max_value (`float`, defaults to 1.0):\n The threshold value for dynamic thresholding. Valid only when `thresholding=True`.\n timestep_spacing (`str`, defaults to `\"leading\"`):\n The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.\n rescale_betas_zero_snr (`bool`, defaults to `False`):\n Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and\n dark samples instead of limiting it to samples with medium brightness. Loosely related to\n [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).\n \"\"\"\n\n # _compatibles = [e.name for e in KarrasDiffusionSchedulers]\n order = 1\n\n @register_to_config\n def __init__(\n self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False,\n ):\n if trained_betas is not None:\n self.betas = torch.tensor(trained_betas, dtype=torch.float32)\n elif beta_schedule == \"linear\":\n self.betas = torch.linspace(\n beta_start, beta_end, num_train_timesteps, dtype=torch.float32)\n elif beta_schedule == \"scaled_linear\":\n # this schedule is very specific to the latent diffusion model.\n self.betas = (\n torch.linspace(beta_start**0.5, beta_end**0.5,\n num_train_timesteps, dtype=torch.float32) ** 2\n )\n elif beta_schedule == \"squaredcos_cap_v2\":\n # Glide cosine schedule\n self.betas = betas_for_alpha_bar(num_train_timesteps)\n else:\n raise NotImplementedError(\n f\"{beta_schedule} does is not implemented for {self.__class__}\")\n\n # Rescale for zero SNR\n if rescale_betas_zero_snr:\n self.betas = rescale_zero_terminal_snr(self.betas)\n\n self.alphas = 1.0 - self.betas\n self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)\n\n # At every step in ddim, we are looking into the previous alphas_cumprod\n # For the final step, there is no previous alphas_cumprod because we are already at 0\n # `set_alpha_to_one` decides whether we set this parameter simply to one or\n # whether we use the final alpha of the \"non-previous\" one.\n self.final_alpha_cumprod = torch.tensor(\n 1.0) if set_alpha_to_one else self.alphas_cumprod[0]\n\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n\n # setable values\n self.num_inference_steps = None\n self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[\n ::-1].copy().astype(np.int64))\n\n def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n \"\"\"\n Ensures interchangeability with schedulers that need to scale the denoising model input depending on the\n current timestep.\n\n Args:\n sample (`torch.FloatTensor`):\n The input sample.\n timestep (`int`, *optional*):\n The current timestep in the diffusion chain.\n\n Returns:\n `torch.FloatTensor`:\n A scaled input sample.\n \"\"\"\n return sample\n\n def _get_variance(self, timestep, prev_timestep):\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n variance = (beta_prod_t_prev / beta_prod_t) * \\\n (1 - alpha_prod_t / alpha_prod_t_prev)\n\n return variance\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample\n def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n \"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the\n prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by\n s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing\n pixels from saturation at each step. We find that dynamic thresholding results in significantly better\n photorealism as well as better image-text alignment, especially when using very large guidance weights.\"\n\n https://arxiv.org/abs/2205.11487\n \"\"\"\n dtype = sample.dtype\n batch_size, channels, height, width = sample.shape\n\n if dtype not in (torch.float32, torch.float64):\n # upcast for quantile calculation, and clamp not implemented for cpu half\n sample = sample.float()\n\n # Flatten sample for doing quantile calculation along each image\n sample = sample.reshape(batch_size, channels * height * width)\n\n abs_sample = sample.abs() # \"a certain percentile absolute pixel value\"\n\n s = torch.quantile(\n abs_sample, self.config.dynamic_thresholding_ratio, dim=1)\n s = torch.clamp(\n s, min=1, max=self.config.sample_max_value\n ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]\n\n # (batch_size, 1) because clamp will broadcast along dim=0\n s = s.unsqueeze(1)\n # \"we threshold xt0 to the range [-s, s] and then divide by s\"\n sample = torch.clamp(sample, -s, s) / s\n\n sample = sample.reshape(batch_size, channels, height, width)\n sample = sample.to(dtype)\n\n return sample\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: int, device: Union[str, torch.device] = None):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n \"\"\"\n\n if num_inference_steps > self.config.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:\"\n f\" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.config.num_train_timesteps} timesteps.\"\n )\n\n self.num_inference_steps = num_inference_steps\n\n # LCM Timesteps Setting: # Linear Spacing\n c = self.config.num_train_timesteps // original_inference_steps\n lcm_origin_timesteps = np.asarray(\n list(range(1, original_inference_steps + 1))) * c - 1 # LCM Training Steps Schedule\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-\n skipping_step][:num_inference_steps]\n\n self.timesteps = torch.from_numpy(timesteps.copy()).to(device)\n\n def get_scalings_for_boundary_condition_discrete(self, t):\n self.sigma_data = 0.5 # Default: 0.5\n\n # By dividing 0.1: This is almost a delta function at t=0.\n c_skip = self.sigma_data**2 / (\n (t / 0.1) ** 2 + self.sigma_data**2\n )\n c_out = ((t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5)\n return c_skip, c_out\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timeindex: int,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[LCMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n A current instance of a sample created by the diffusion process.\n eta (`float`):\n The weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`, defaults to `False`):\n If `True`, computes \"corrected\" `model_output` from the clipped predicted original sample. Necessary\n because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no\n clipping has happened, \"corrected\" `model_output` would coincide with the one provided as input and\n `use_clipped_model_output` has no effect.\n generator (`torch.Generator`, *optional*):\n A random number generator.\n variance_noise (`torch.FloatTensor`):\n Alternative to generating noise with `generator` by directly providing the noise for the variance\n itself. Useful for methods such as [`CycleDiffusion`].\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.\n\n Returns:\n [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:\n If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # 1. get previous step value\n prev_timeindex = timeindex + 1\n if prev_timeindex < len(self.timesteps):\n prev_timestep = self.timesteps[prev_timeindex]\n else:\n prev_timestep = timestep\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 3. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(\n timestep)\n\n # 4. Different Parameterization:\n parameterization = self.config.prediction_type\n\n if parameterization == \"epsilon\": # noise-prediction\n pred_x0 = (sample - beta_prod_t.sqrt() *\n model_output) / alpha_prod_t.sqrt()\n\n elif parameterization == \"sample\": # x-prediction\n pred_x0 = model_output\n\n elif parameterization == \"v_prediction\": # v-prediction\n pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output\n\n # 4. Denoise model output using boundary conditions\n denoised = c_out * pred_x0 + c_skip * sample\n\n # 5. Sample z ~ N(0, I), For MultiStep Inference\n # Noise is not used for one-step sampling.\n if len(self.timesteps) > 1:\n noise = torch.randn(model_output.shape).to(model_output.device)\n prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise\n else:\n prev_sample = denoised\n\n if not return_dict:\n return (prev_sample, denoised)\n\n return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise\n\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.IntTensor,\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as original_samples\n alphas_cumprod = self.alphas_cumprod.to(\n device=original_samples.device, dtype=original_samples.dtype)\n timesteps = timesteps.to(original_samples.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(original_samples.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n noisy_samples = sqrt_alpha_prod * original_samples + \\\n sqrt_one_minus_alpha_prod * noise\n return noisy_samples\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity\n def get_velocity(\n self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as sample\n alphas_cumprod = self.alphas_cumprod.to(\n device=sample.device, dtype=sample.dtype)\n timesteps = timesteps.to(sample.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(sample.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample\n return velocity\n\n def __len__(self):\n return self.config.num_train_timesteps" }, { "identifier": "LatentConsistencyModelPipeline", "path": "lcm/lcm_pipeline.py", "snippet": "class LatentConsistencyModelPipeline(DiffusionPipeline):\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: None,\n safety_checker: None,\n feature_extractor: CLIPImageProcessor\n ):\n super().__init__()\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (\n len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(\n vae_scale_factor=self.vae_scale_factor)\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds: None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n \"\"\"\n\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(\n prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(\n dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(\n bs_embed * num_images_per_prompt, seq_len, -1)\n\n # Don't need to get uncond prompt embedding because of LCM Guided Distillation\n return prompt_embeds\n\n # ¯\\_(ツ)_/¯\n def run_safety_checker(self, image, device, dtype):\n return image, None\n \n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):\n shape = (batch_size, num_channels_latents, height //\n self.vae_scale_factor, width // self.vae_scale_factor)\n if latents is None:\n latents = torch.randn(shape, dtype=dtype).to(device)\n else:\n latents = latents.to(device)\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):\n \"\"\"\n see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298\n Args:\n timesteps: torch.Tensor: generate embedding vectors at these timesteps\n embedding_dim: int: dimension of the embeddings to generate\n dtype: data type of the generated embeddings\n\n Returns:\n embedding vectors with shape `(len(timesteps), embedding_dim)`\n \"\"\"\n assert len(w.shape) == 1\n w = w * 1000.\n\n half_dim = embedding_dim // 2\n emb = torch.log(torch.tensor(10000.)) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)\n emb = w.to(dtype)[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1))\n assert emb.shape == (w.shape[0], embedding_dim)\n return emb\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = 768,\n width: Optional[int] = 768,\n guidance_scale: float = 7.5,\n num_images_per_prompt: Optional[int] = 1,\n latents: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 4,\n original_inference_steps: int = 50,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n device: Optional[Union[str, torch.device]] = None,\n ):\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds=prompt_embeds,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, original_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variable\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n latents,\n )\n bs = batch_size * num_images_per_prompt\n\n # 6. Get Guidance Scale Embedding\n w = torch.tensor(guidance_scale).repeat(bs)\n w_embedding = self.get_w_embedding(w, embedding_dim=256).to(\n device=device, dtype=latents.dtype)\n\n # 7. LCM MultiStep Sampling Loop:\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n\n ts = torch.full((bs,), t, device=device, dtype=torch.long)\n latents = latents.to(prompt_embeds.dtype)\n\n # model prediction (v-prediction, eps, x)\n model_pred = self.unet(\n latents,\n ts,\n timestep_cond=w_embedding,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False)[0]\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, denoised = self.scheduler.step(\n model_pred, i, t, latents, return_dict=False)\n\n # # call the callback, if provided\n # if i == len(timesteps) - 1:\n progress_bar.update()\n\n denoised = denoised.to(prompt_embeds.dtype)\n if not output_type == \"latent\":\n image = self.vae.decode(\n denoised / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(\n image, device, prompt_embeds.dtype)\n else:\n image = denoised\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(\n image, output_type=output_type, do_denormalize=do_denormalize)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "LatentConsistencyModelImg2ImgPipeline", "path": "lcm/lcm_i2i_pipeline.py", "snippet": "class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):\n _optional_components = [\"scheduler\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: \"LCMSchedulerWithTimestamp\",\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = False,\n ):\n super().__init__()\n\n scheduler = (\n scheduler\n if scheduler is not None\n else LCMSchedulerWithTimestamp(\n beta_start=0.00085, beta_end=0.0120, beta_schedule=\"scaled_linear\", prediction_type=\"epsilon\"\n )\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds: None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n \"\"\"\n\n if prompt is not None and isinstance(prompt, str):\n pass\n elif prompt is not None and isinstance(prompt, list):\n len(prompt)\n else:\n prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # Don't need to get uncond prompt embedding because of LCM Guided Distillation\n return prompt_embeds\n\n # ¯\\_(ツ)_/¯\n def run_safety_checker(self, image, device, dtype):\n return image, None\n\n def prepare_latents(self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, latents=None, generator=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n image = image.to(device=device, dtype=dtype)\n\n # batch_size = batch_size * num_images_per_prompt\n\n if image.shape[1] == 4:\n init_latents = image\n\n else:\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n elif isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.sample(generator)\n\n init_latents = self.vae.config.scaling_factor * init_latents\n\n if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:\n # expand init_latents for batch_size\n deprecation_message = (\n f\"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial\"\n \" images (`image`). Initial images are now duplicating to match the number of text prompts. Note\"\n \" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update\"\n \" your script to pass as many initial images as text prompts to suppress this warning.\"\n )\n # deprecate(\"len(prompt) != len(image)\", \"1.0.0\", deprecation_message, standard_warn=False)\n additional_image_per_prompt = batch_size // init_latents.shape[0]\n init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)\n elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:\n raise ValueError(\n f\"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.\"\n )\n else:\n init_latents = torch.cat([init_latents], dim=0)\n\n shape = init_latents.shape\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n\n # get latents\n init_latents = self.scheduler.add_noise(init_latents, noise, timestep)\n latents = init_latents\n\n return latents\n\n if latents is None:\n latents = torch.randn(shape, dtype=dtype).to(device)\n else:\n latents = latents.to(device)\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):\n \"\"\"\n see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298\n Args:\n timesteps: torch.Tensor: generate embedding vectors at these timesteps\n embedding_dim: int: dimension of the embeddings to generate\n dtype: data type of the generated embeddings\n Returns:\n embedding vectors with shape `(len(timesteps), embedding_dim)`\n \"\"\"\n assert len(w.shape) == 1\n w = w * 1000.0\n\n half_dim = embedding_dim // 2\n emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)\n emb = w.to(dtype)[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1))\n assert emb.shape == (w.shape[0], embedding_dim)\n return emb\n\n def get_timesteps(self, num_inference_steps, strength, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]\n\n return timesteps, num_inference_steps - t_start\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n strength: float = 0.8,\n height: Optional[int] = 768,\n width: Optional[int] = 768,\n guidance_scale: float = 7.5,\n num_images_per_prompt: Optional[int] = 1,\n latents: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 4,\n original_inference_steps: int = 50,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n device: Optional[Union[str, torch.device]] = None,\n ):\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = device\n # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds=prompt_embeds,\n )\n\n # 3.5 encode image\n image = self.image_processor.preprocess(image=image)\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(strength, num_inference_steps, original_inference_steps)\n # timesteps = self.scheduler.timesteps\n # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)\n timesteps = self.scheduler.timesteps\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n\n # 5. Prepare latent variable\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n image,\n latent_timestep,\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n latents,\n )\n bs = batch_size * num_images_per_prompt\n\n # 6. Get Guidance Scale Embedding\n w = torch.tensor(guidance_scale).repeat(bs)\n w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)\n\n # 7. LCM MultiStep Sampling Loop:\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n ts = torch.full((bs,), t, device=device, dtype=torch.long)\n latents = latents.to(prompt_embeds.dtype)\n\n # model prediction (v-prediction, eps, x)\n model_pred = self.unet(\n latents,\n ts,\n timestep_cond=w_embedding,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)\n\n # # call the callback, if provided\n # if i == len(timesteps) - 1:\n progress_bar.update()\n\n denoised = denoised.to(prompt_embeds.dtype)\n if not output_type == \"latent\":\n image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = denoised\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" } ]
from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Optional from lcm.lcm_scheduler import LCMScheduler from lcm.lcm_pipeline import LatentConsistencyModelPipeline from lcm.lcm_i2i_pipeline import LatentConsistencyModelImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from modules import script_callbacks from PIL import Image, PngImagePlugin import uuid import modules.scripts as scripts import modules.shared import os import random import time import numpy as np import gradio as gr import torch import cv2
11,633
return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32") scheduler = LCMScheduler.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler") pipe = LatentConsistencyModelPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", scheduler = scheduler, safety_checker = None) if use_fp16: pipe.to(torch_device=selected_device, torch_dtype=torch.float16) else: pipe.to(torch_device=selected_device, torch_dtype=torch.float32) # Windows does not support torch.compile for now if os.name != 'nt' and use_torch_compile: pipe.unet = torch.compile(pipe.unet, mode='max-autotune') start_time = time.time() result = pipe( prompt=prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images, original_inference_steps=50, output_type="pil", device = selected_device ).images paths = save_images(result, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps}) elapsed_time = time.time() - start_time print("LCM inference time: ", elapsed_time, "seconds") return paths, seed def generate_i2i( prompt: str, image: PipelineImageInput = None, strength: float = 0.8, seed: int = 0, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True), width: Optional[int] = 512, height: Optional[int] = 512, ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32") scheduler = LCMScheduler.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler") pipe = LatentConsistencyModelPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", scheduler = scheduler, safety_checker = None) if use_fp16: pipe.to(torch_device=selected_device, torch_dtype=torch.float16) else: pipe.to(torch_device=selected_device, torch_dtype=torch.float32) # Windows does not support torch.compile for now if os.name != 'nt' and use_torch_compile: pipe.unet = torch.compile(pipe.unet, mode='max-autotune') start_time = time.time() result = pipe( prompt=prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images, original_inference_steps=50, output_type="pil", device = selected_device ).images paths = save_images(result, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps}) elapsed_time = time.time() - start_time print("LCM inference time: ", elapsed_time, "seconds") return paths, seed def generate_i2i( prompt: str, image: PipelineImageInput = None, strength: float = 0.8, seed: int = 0, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True), width: Optional[int] = 512, height: Optional[int] = 512, ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained(
2
2023-10-22 11:53:48+00:00
16k
kylesargent/ZeroNVS
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n # import pdb\n # pdb.set_trace()\n\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x = x.clone()\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n assert self.unbounded\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,662
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
self.feature_network = get_mlp(
8
2023-10-24 19:02:44+00:00
16k
princeton-nlp/LLM-Shearing
llmshearing/models/composer_pythia.py
[ { "identifier": "L0Module", "path": "llmshearing/models/l0_module.py", "snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.base_model_info = self.set_model_info(cfg, n_matrix_mlp=n_matrix_mlp) \n l0_module_cfg = cfg.l0_module\n self.target_model_info = None\n target_model_cfg = getattr(l0_module_cfg, \"target_model\", None)\n if target_model_cfg is not None:\n self.target_model_info = self.set_model_info(target_model_cfg, n_matrix_mlp=n_matrix_mlp)\n \n # l0 config\n self.pruning_modules = l0_module_cfg.pruning_modules \n self.start_sparsity = l0_module_cfg.start_sparsity \n self.lagrangian_warmup_steps = Time.from_timestring(l0_module_cfg.lagrangian_warmup_steps).value\n self.device = device\n self.eval_target_model = l0_module_cfg.get(\"eval_target_model\", True)\n \n # l0 params\n self.lambdas = {}\n self.lambdas[\"lambda_1\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.lambdas[\"lambda_2\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.masks = {}\n for pruning_module in self.pruning_modules:\n self.initialize_one_module(pruning_module)\n self.masks = torch.nn.ModuleDict(self.masks)\n self.lambdas = torch.nn.ParameterDict(self.lambdas)\n \n # config after initialization\n self.prunable_model_size = self.calculate_prunable_model_size(self.base_model_info)\n if target_model_cfg is not None:\n self.prunable_target_model_size = self.calculate_prunable_model_size(self.target_model_info)\n self.target_sparsity = 1 - self.prunable_target_model_size / self.prunable_model_size\n else:\n self.target_sparsity = l0_module_cfg.target_sparsity\n\n print(\"********** Initializing L0 Module **********\") \n for pruning_module in self.pruning_modules:\n print(f\"***** {pruning_module} *****\")\n print(f\"z.shape\", self.masks[pruning_module].z_loga.shape)\n print(f\"size\", self.masks[pruning_module].mask_size)\n print(f\"prunable model size: {self.prunable_model_size}\")\n \n \n def set_model_info(self, cfg, n_matrix_mlp):\n ns = NS() \n ns.hidden_size = cfg.d_model\n ns.intermediate_size = cfg.intermediate_size\n ns.num_attention_heads = cfg.n_heads\n ns.mlp_num_per_layer = 1\n ns.dim_per_head = ns.hidden_size // ns.num_attention_heads \n ns.num_layers = cfg.n_layers\n ns.vocab_size = cfg.vocab_size\n\n ns.params_per_head_layer = ns.hidden_size * ns.hidden_size * 4\n ns.params_per_head = ns.params_per_head_layer // ns.num_attention_heads\n ns.params_per_mlp_layer = ns.hidden_size * ns.intermediate_size * n_matrix_mlp\n ns.params_per_intermediate_dim = ns.params_per_mlp_layer // ns.intermediate_size\n\n ns.full_model_size = (ns.params_per_head_layer + ns.params_per_mlp_layer) * ns.num_layers\n return ns\n \n def calculate_prunable_model_size(self, ns: NS):\n prunable_mlp_size = ns.params_per_mlp_layer * ns.num_layers\n prunable_head_layer_size = ns.params_per_head_layer * ns.num_layers\n prunable_model_size = 0\n if \"hidden\" in self.pruning_modules:\n return prunable_mlp_size + prunable_head_layer_size\n if \"head_layer\" in self.pruning_modules or \"head\" in self.pruning_modules:\n prunable_model_size += prunable_head_layer_size\n if \"mlp\" in self.pruning_modules or \"intermediate\" in self.pruning_modules:\n prunable_model_size += prunable_mlp_size\n return prunable_model_size\n \n def initialize_one_module(self, module_name: str):\n func_name = f\"initialize_{module_name}\"\n try:\n method = getattr(self, func_name)\n except AttributeError:\n raise NotImplementedError(\"Instance `{}` does not implement `{}`\".format(self, func_name))\n method()\n \n def initialize_hidden(self):\n mask_shape = [self.base_model_info.hidden_size]\n num_params_per_mask=self.base_model_info.hidden_size * 4 + self.base_model_info.hidden_size * 4 * 2\n \n target_hidden_sparsity = None; pd=None; target_mask_size=None; \n if self.target_model_info is not None:\n target_hidden_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n target_mask_size = self.target_model_info.hidden_size\n pd = {\"lambda_1_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n hidden_mask = Mask(name=\"hidden\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=[self.base_model_info.hidden_size],\n target_sparsity=target_hidden_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"hidden\"] = hidden_mask\n\n def initialize_head(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads]\n num_params_per_mask = self.base_model_info.params_per_head\n mask_output_shape = [self.base_model_info.num_layers, 1, self.base_model_info.num_attention_heads, 1] \n \n target_head_sparsity = None; pd = {} ; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_sparsity = 1 - self.target_model_info.num_attention_heads / self.base_model_info.num_attention_heads\n target_mask_size = self.target_model_info.num_attention_heads\n pd = {\"lambda_1_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n head_mask = Mask(name=\"head\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head\"] = head_mask \n\n def initialize_qk_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_qk_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_qk_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n qk_head_dim = Mask(name=\"qk_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_qk_head_dim_sparsity,\n target_mask_size=self.target_model_info.hidden_size,\n device=self.device)\n self.masks[\"qk_head_dim\"] = qk_head_dim \n \n \n def initialize_vo_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_vo_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_vo_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n vo_head_dim = Mask(name=\"vo_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_vo_head_dim_sparsity,\n device=self.device)\n self.masks[\"vo_head_dim\"] = vo_head_dim \n \n def initialize_head_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_head_layer_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n head_layer_mask = Mask(name=\"head_layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head_layer\"] = head_layer_mask\n \n def initialize_intermediate(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.intermediate_size]\n num_params_per_mask=self.base_model_info.params_per_intermediate_dim\n mask_output_shape = [self.base_model_info.num_layers, 1, 1, self.base_model_info.intermediate_size] \n \n target_int_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_int_sparsity = 1 - self.target_model_info.intermediate_size / self.base_model_info.intermediate_size\n target_mask_size = self.target_model_info.intermediate_size\n pd = {\"lambda_1_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n int_mask = Mask(name=\"intermediate\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_int_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"intermediate\"] = int_mask\n \n\n def initialize_mlp(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_mlp_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_mlp_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n mlp_mask = Mask(name=\"mlp\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_mlp_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"mlp\"] = mlp_mask \n\n def initialize_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads + self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_layer_sparsity = None; target_mask_size=None; pd = {}\n if self.target_model_info is not None:\n target_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n layer_mask = Mask(name=\"layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model) \n self.masks[\"layer\"] = layer_mask \n \n def constrain_parameters(self):\n for key in self.masks:\n self.masks[key].constrain_parameters()\n\n def calculate_expected_score_sparsity(self):\n expected_scores = {}\n expected_sparsitys = {}\n for key in self.masks:\n score, sparsity = self.masks[key].calculate_expected_score_sparsity()\n expected_scores[key] = score\n expected_sparsitys[key] = sparsity\n return expected_scores, expected_sparsitys\n \n def transform_scores_for_head(self, expected_scores: dict):\n head_score = expected_scores[\"head\"] # 12 * 12\n\n head_layer_score = None\n if \"head_layer\" in expected_scores:\n head_layer_score = expected_scores[\"head_layer\"]\n elif \"layer\" in expected_scores:\n head_layer_score = expected_scores[\"layer\"] # 12\n if head_layer_score is not None:\n head_layer_score = head_layer_score.view(-1, 1) # 12 * 1\n \n return head_layer_score, head_score\n\n def transform_scores_for_mlp(self, expected_scores: dict):\n mlp_score = None\n if \"mlp\" in expected_scores:\n mlp_score = expected_scores[\"mlp\"] # 12\n elif \"layer\" in expected_scores:\n mlp_score = expected_scores[\"layer\"] # 12\n if mlp_score is not None:\n mlp_score = mlp_score.unsqueeze(-1)\n \n intermediate_score = expected_scores[\"intermediate\"] # 12 * 3072\n return mlp_score, intermediate_score\n\n\n def get_expected_num_params(self, expected_scores: dict): #! calculate the current parsity\n num_parameters = 0\n \n # 12 * 1 \n # 12 * 12\n head_layer_score, head_score = self.transform_scores_for_head(expected_scores)\n mlp_score, int_score = self.transform_scores_for_mlp(expected_scores)\n \n head_score = (head_layer_score * head_score) # 12 * 12\n int_score = (mlp_score * int_score) # 12 * 3072\n\n qk_score = None\n if \"qk_head_dim\" in expected_scores:\n qk_head_dim_score = expected_scores[\"qk_head_dim\"] # num_layer * hidden_size\n vo_head_dim_score = expected_scores[\"vo_head_dim\"] # num_layer * hidden_size\n qk_head_dim_score = qk_head_dim_score.view(qk_head_dim_score.shape[0], -1) # 12 * 768\n vo_head_dim_score = vo_head_dim_score.view(vo_head_dim_score.shape[0], -1) # 12 * 768\n head_score = torch.repeat_interleave(head_score, self.base_model_info.dim_per_head, dim=1) # 12 * 768\n\n qk_score = head_score * qk_head_dim_score # 12 * 768\n vo_score = head_score * vo_head_dim_score # 12 * 768\n \n if \"hidden\" in expected_scores:\n hidden_score = expected_scores[\"hidden\"] # 768 \n \n if qk_score is None:\n num_parameters += torch.outer(hidden_score, head_score.reshape(-1)).sum() * self.masks.head.num_params_per_mask / self.base_model_info.hidden_size # 768 * 144\n num_parameters += torch.outer(hidden_score, int_score.reshape(-1)).sum() * self.masks.intermediate.num_params_per_mask / self.base_model_info.hidden_size # 768 * 36864\n else:\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), qk_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), vo_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), int_score.unsqueeze(1))) * 3 # 12 * 768 * 3072\n else:\n num_parameters += torch.sum(head_score) * self.masks.head.num_params_per_mask\n num_parameters += torch.sum(int_score) * self.masks.intermediate.num_params_per_mask\n return num_parameters\n \n def get_target_sparsity(self, pruned_steps: int, full_sparsity: float = None):\n target_sparsity = full_sparsity\n if getattr(self, \"lagrangian_warmup_steps\", 0) > 0:\n target_sparsity = (target_sparsity - self.start_sparsity) * min(1, pruned_steps / self.lagrangian_warmup_steps) + self.start_sparsity\n return target_sparsity\n\n\n def lagrangian_regularization(self, pruned_steps: int):\n def _lag_loss(expected_sparsity: torch.tensor, target_sparsity: float, lambda_1: torch.tensor, lambda_2: torch.tensor):\n lagrangian_loss = lambda_1 * (expected_sparsity - target_sparsity) + lambda_2 * (expected_sparsity - target_sparsity) ** 2 \n lagrangian_loss = lagrangian_loss.mean()\n return lagrangian_loss\n\n target_sparsity = self.get_target_sparsity(pruned_steps, self.target_sparsity) \n expected_scores, expected_sparsitys = self.calculate_expected_score_sparsity()\n expected_size = self.get_expected_num_params(expected_scores) #! calculate \\bar s\n expected_sparsity = 1 - expected_size / self.prunable_model_size\n \n return_v = {}\n if self.target_model_info is None:\n lagrangian_loss = _lag_loss(expected_sparsity, target_sparsity, self.lambdas[\"lambda_1\"], self.lambdas[\"lambda_2\"])\n return_v = {\"expected_sparsity\": expected_sparsity.item(), \"target_sparsity\": target_sparsity}\n for key in expected_sparsitys:\n return_v[f\"expected_{key}_sparsity\"] = expected_sparsitys[key].mean().item()\n else:\n lagrangian_loss = 0\n return_v = {}\n for pruning_module in self.pruning_modules:\n ts = self.get_target_sparsity(pruned_steps, self.masks[pruning_module].target_sparsity)\n expected_ts = expected_sparsitys[pruning_module] \n lagrangian_loss += _lag_loss(expected_ts, ts, self.lambdas[f\"lambda_1_{pruning_module}\"], self.lambdas[f\"lambda_2_{pruning_module}\"])\n expected_ts = expected_ts.mean().item()\n return_v.update({\"expected_{}_sparsity\".format(pruning_module): expected_ts, \"target_{}_sparsity\".format(pruning_module): ts})\n return_v[\"expected_sparsity\"] = expected_sparsity.item()\n return_v[\"target_sparsity\"] = target_sparsity\n\n\n # return_v might not matter\n return lagrangian_loss, return_v\n \n def forward(self, calculate_lagrangian: bool = False, pruned_steps: int = 0):\n self.constrain_parameters()\n if calculate_lagrangian:\n return self.lagrangian_regularization(pruned_steps)\n \n zs = {f\"{pruning_module}_z\": [] for pruning_module in self.pruning_modules}\n \n if \"layer\" in self.pruning_modules:\n zs.pop(\"layer_z\")\n zs[\"mlp_z\"] = []\n zs[\"head_layer_z\"] = []\n \n if self.training:\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.sample_z()\n zs[f\"{pruning_module}_z\"] = z\n else: # removed layerwise! \n with torch.no_grad():\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.deterministic_z()\n zs[f\"{pruning_module}_z\"] = z\n if \"layer_z\" in zs:\n zs[\"mlp_z\"] = zs.pop(\"layer_z\")\n zs[\"head_layer_z\"] = zs[\"mlp_z\"]\n return zs " }, { "identifier": "ComposerMosaicLlama", "path": "llmshearing/models/composer_llama.py", "snippet": "class ComposerMosaicLlama(ComposerModel):\n \"\"\" Llama model with the Composer model interface. \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.model = LlamaModel(cfg)\n self.ref_model = None\n self.num_fwd_flops = self._compute_num_fwd_flops()\n self.train_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n self.eval_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n\n self.set_names = getattr(cfg, \"set_names\", None)\n if self.set_names is not None:\n self.set_name_to_id = {set_name: i for i, set_name in enumerate(self.set_names)}\n self.set_id_to_name = {i: set_name for i, set_name in enumerate(self.set_names)}\n \n for set_name in self.set_names:\n # add train and eval metrics for each set\n self.train_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.eval_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.train_metrics[f'{set_name}_count'] = DomainCount(set_name=set_name, set_index=self.set_name_to_id[set_name]) \n\n def prune_params(self, zs=None):\n self.model.prune_params(zs)\n \n def get_targets(self, batch):\n targets = torch.roll(batch['labels'], shifts=-1)\n targets[:, -1] = -100\n return targets\n \n def forward(self, batch):\n input_ids = batch['input_ids']\n key_padding_mask = batch['attention_mask'].bool(\n ) if 'attention_mask' in batch else None\n pruned_steps = batch.get('pruned_steps', None)\n if pruned_steps is not None:\n pruned_steps = pruned_steps[0].item()\n zs = {key: batch[key] for key in batch if \"_z\" in key}\n model_output = self.model(input_ids=input_ids, key_padding_mask=key_padding_mask, pruned_steps=pruned_steps, **zs)\n return model_output\n\n def eval_forward(self, batch, outputs=None):\n return outputs if outputs is not None else self.forward(batch)\n\n def loss(self, outputs, batch):\n logits = outputs[\"logits\"]\n l0_output = outputs[\"l0_output\"]\n targets = self.get_targets(batch)\n\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)),\n targets.view(-1),\n ignore_index=-100)\n return_loss = {\"ce_loss\": loss}\n if l0_output is not None:\n lag_loss = l0_output[0]\n return_loss[\"lag_loss\"] = lag_loss\n return_loss[\"total\"] = sum(return_loss.values())\n return return_loss\n\n def get_metrics(self, is_train=False):\n return self.train_metrics if is_train else self.eval_metrics\n\n def update_metric(self, batch, outputs, metric) -> None:\n logits = outputs[\"logits\"]\n if isinstance(metric, DomainLanguageCrossEntropy):\n targets = self.get_targets(batch)\n set_id = self.set_name_to_id[metric.set_name]\n targets[batch[\"set\"] != set_id] = -100\n metric.update(logits, targets)\n elif isinstance(metric, DomainCount):\n with torch.inference_mode():\n idx = None\n selected_sets = batch['set']\n metric.update(selected_sets, idx)\n else:\n logits = logits.view(-1, logits.size(-1))\n targets = self.get_targets(batch).view(-1)\n metric.update(logits, targets)\n\n def add_eval_metrics(self, evaluator):\n evaluator_metrics = {\n m: METRIC_DEFAULT_CTORS[m]() for m in evaluator.metric_names\n }\n if self.eval_metrics is not None:\n self.eval_metrics.update(evaluator_metrics)\n else:\n self.eval_metrics = evaluator_metrics\n\n def _compute_num_fwd_flops(self):\n # Might not be correct for LLaMA structures\n n_params = sum(p.numel() for p in self.parameters())\n # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.model.cfg.max_seq_len\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = self.model.cfg.n_layers * 2 * 2 * (\n self.model.cfg.d_model * (self.model.cfg.max_seq_len**2))\n return params_flops_per_seq + attn_flops_per_seq\n\n def flops_per_batch(self, batch):\n # Note: this computation does not take into account padding, and assumes\n # that the dataset has been constructed without padding. Additionally, we\n # assume the backward pass is approximately 2x the forward pass\n return self.num_fwd_flops * 3 * batch['input_ids'].shape[0]\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n if new_num_tokens is not None:\n self.model._resize_token_embeddings(new_num_tokens)" }, { "identifier": "prepare_decoder_attention_mask", "path": "llmshearing/models/composer_llama.py", "snippet": "def prepare_decoder_attention_mask(input_shape, inputs_embeds):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype).to(inputs_embeds.device)\n\n return combined_attention_mask" }, { "identifier": "turn_head_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_head_z(head_z, head_layer_z):\n head_z = head_z.squeeze().clone()\n if head_layer_z is not None:\n head_z *= head_layer_z\n to_prune_heads = torch.where(head_z == 0)[0].view(-1).tolist()\n return to_prune_heads" }, { "identifier": "turn_mlp_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_mlp_z(intermediate_z, mlp_z):\n intermediate_z_layer = intermediate_z.squeeze().clone()\n if mlp_z is not None:\n intermediate_z_layer *= mlp_z\n keep_intermediate_dims = torch.where(intermediate_z_layer != 0)[0].tolist()\n return keep_intermediate_dims " }, { "identifier": "normal_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def normal_attn_fn(\n query,\n key, \n value,\n attention_mask=None,\n head_z=None\n):\n bsz, n_heads, q_len, head_dim = query.shape\n dim = n_heads * head_dim\n attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)\n attn_weights = attn_weights + attention_mask\n attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))\n\n # upcast attention to fp32\n attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)\n attn_output = torch.matmul(attn_weights, value) # (bsz, n_heads, q_len, head_dim)\n if head_z is not None:\n attn_output *= head_z.unsqueeze(-1)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, q_len, dim)\n return attn_output" }, { "identifier": "flash_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def flash_attn_fn(\n query,\n key,\n value,\n softmax_scale=None,\n attn_bias=None,\n query_padding_mask=None,\n key_padding_mask=None,\n is_causal=False,\n dropout_p=0.0,\n training=False,\n needs_weights=False,\n head_z=None,\n \n):\n try:\n from flash_attn import bert_padding # type: ignore\n from flash_attn import flash_attn_interface # type: ignore\n except ImportError as e:\n raise e\n\n # check_valid_inputs(query, key, value)\n\n if attn_bias is not None:\n raise NotImplementedError(f'attn_bias not implemented for flash attn.')\n\n batch_size, seqlen = query.shape[:2]\n\n if query_padding_mask is None:\n query_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=query.device)\n if key_padding_mask is None:\n key_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=key.device)\n\n query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(\n query, query_padding_mask)\n # query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(\n key, key_padding_mask)\n # key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)\n # value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n dropout_p = dropout_p if training else 0.0\n \n output_unpad = flash_attn_interface.flash_attn_unpadded_func(\n query_unpad,\n key_unpad,\n value_unpad,\n cu_seqlens_q,\n cu_seqlens_k,\n max_seqlen_q,\n max_seqlen_k,\n dropout_p,\n softmax_scale=softmax_scale,\n causal=is_causal,\n return_attn_probs=needs_weights)\n\n if head_z is not None:\n output_unpad = output_unpad * head_z # 1 * h * 1\n output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)\n return output, None" } ]
import math import torch import torch.nn as nn from typing import List, Optional, Tuple from einops import rearrange from omegaconf import DictConfig from torch.nn import functional as F from transformers.pytorch_utils import (find_pruneable_heads_and_indices, prune_linear_layer) from llmshearing.models.l0_module import L0Module from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
10,948
head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits) self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn self.out_proj = nn.Linear(self.d_model, self.d_model, device=device, bias=True) self.out_proj._is_residual = True # type: ignore self.rotary_ndims = int(self.head_dim * cfg.rotary_pct) self.rotary_emb = RotaryEmbedding(self.rotary_ndims, max_position_embeddings=cfg.max_seq_len, device=device) def prune_params(self, zs_block): head_z = None; head_layer_z = None; hidden_z = None; qk_head_dim_z = None; vo_head_dim_z = None if "head_z" in zs_block: head_z = zs_block["head_z"].squeeze() if "head_layer_z" in zs_block: head_layer_z = zs_block["head_layer_z"].squeeze() if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"].squeeze() # update params # if head_z is not None: head_z_for_update = torch.repeat_interleave(head_z, self.head_dim) start_index = torch.arange(0, self.n_heads * 3, 3) + 2 end_index = start_index + 1 index = torch.cat([torch.arange(i, j) for i, j in zip(start_index * self.head_dim, end_index * self.head_dim)]) self.query_key_value.weight.data[index, :] = \ self.query_key_value.weight.data.transpose(0, 1)[:, index].mul(head_z_for_update).transpose(0, 1) self.query_key_value.bias.data[index] = \ self.query_key_value.bias.data[index].mul(head_z_for_update) if head_layer_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(head_layer_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(head_layer_z) if hidden_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" Head hidden: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.query_key_value.parameters()).dtype == torch.float16 self.query_key_value = prune_linear_layer(self.query_key_value, remaining_index, dim=1) self.out_proj = prune_linear_layer(self.out_proj, remaining_index) if half: self.query_key_value.half() self.out_proj.half()
class ComposerMosaicPythia(ComposerMosaicLlama): def __init__(self, cfg): super().__init__(cfg) self.model = PythiaModel(cfg) class CoFiLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None: super().__init__(normalized_shape, eps, elementwise_affine, device) def forward(self, input, hidden_z=None): if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] compressed_input = torch.index_select( input, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) output = input.clone() normed_input = normed_input.to(output.dtype) output[..., remaining_index] = normed_input else: output = F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb) l0_output = None if self.l0_module is not None: assert zs == {}, "zs should be empty when using L0Module" zs = self.l0_module(calculate_lagrangian=False, pruned_steps=pruned_steps) for b_idx, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, b_idx) past_key_value = past_key_values[ b_idx] if past_key_values is not None else None x, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=self.is_causal, attention_mask=attention_mask, retain_grad=retain_grad, **zs_block ) if past_key_values is not None: past_key_values[b_idx] = past_key_value x = self.transformer.ln_f(x, hidden_z=zs.get("hidden_z", None)) logits = self.transformer.output(x) if self.l0_module is not None: l0_output = self.l0_module(calculate_lagrangian=True, pruned_steps=pruned_steps) return {"logits": logits, "l0_output": l0_output, "zs": zs} def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits) self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn self.out_proj = nn.Linear(self.d_model, self.d_model, device=device, bias=True) self.out_proj._is_residual = True # type: ignore self.rotary_ndims = int(self.head_dim * cfg.rotary_pct) self.rotary_emb = RotaryEmbedding(self.rotary_ndims, max_position_embeddings=cfg.max_seq_len, device=device) def prune_params(self, zs_block): head_z = None; head_layer_z = None; hidden_z = None; qk_head_dim_z = None; vo_head_dim_z = None if "head_z" in zs_block: head_z = zs_block["head_z"].squeeze() if "head_layer_z" in zs_block: head_layer_z = zs_block["head_layer_z"].squeeze() if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"].squeeze() # update params # if head_z is not None: head_z_for_update = torch.repeat_interleave(head_z, self.head_dim) start_index = torch.arange(0, self.n_heads * 3, 3) + 2 end_index = start_index + 1 index = torch.cat([torch.arange(i, j) for i, j in zip(start_index * self.head_dim, end_index * self.head_dim)]) self.query_key_value.weight.data[index, :] = \ self.query_key_value.weight.data.transpose(0, 1)[:, index].mul(head_z_for_update).transpose(0, 1) self.query_key_value.bias.data[index] = \ self.query_key_value.bias.data[index].mul(head_z_for_update) if head_layer_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(head_layer_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(head_layer_z) if hidden_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" Head hidden: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.query_key_value.parameters()).dtype == torch.float16 self.query_key_value = prune_linear_layer(self.query_key_value, remaining_index, dim=1) self.out_proj = prune_linear_layer(self.out_proj, remaining_index) if half: self.query_key_value.half() self.out_proj.half()
to_prune_heads = turn_head_z(head_z, head_layer_z)
3
2023-10-16 12:26:08+00:00
16k
hkchengrex/Cutie
cutie/inference/inference_core.py
[ { "identifier": "MemoryManager", "path": "cutie/inference/memory_manager.py", "snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n self.object_manager = object_manager\n self.sensory_dim = cfg.model.sensory_dim\n self.top_k = cfg.top_k\n self.chunk_size = cfg.chunk_size\n\n self.save_aux = cfg.save_aux\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n # subtract 1 because the first-frame is now counted as \"permanent memory\"\n # and is not counted towards max_mem_frames\n # but we want to keep the hyperparameters consistent as before for the same behavior\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n # dimensions will be inferred from input later\n self.CK = self.CV = None\n self.H = self.W = None\n\n # The sensory memory is stored as a dictionary indexed by object ids\n # each of shape bs * C^h * H * W\n self.sensory = {}\n\n # a dictionary indexed by object ids, each of shape bs * T * Q * C\n self.obj_v = {}\n\n self.work_mem = KeyValueMemoryStore(save_selection=self.use_long_term,\n save_usage=self.use_long_term)\n if self.use_long_term:\n self.long_mem = KeyValueMemoryStore(save_usage=self.count_long_term_usage)\n\n self.config_stale = True\n self.engaged = False\n\n def update_config(self, cfg: DictConfig) -> None:\n self.config_stale = True\n self.top_k = cfg['top_k']\n\n assert self.use_long_term == cfg.use_long_term, 'cannot update this'\n assert self.count_long_term_usage == cfg.long_term.count_usage, 'cannot update this'\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n def _readout(self, affinity, v) -> torch.Tensor:\n # affinity: bs*N*HW\n # v: bs*C*N or bs*num_objects*C*N\n # returns bs*C*HW or bs*num_objects*C*HW\n if len(v.shape) == 3:\n # single object\n return v @ affinity\n else:\n bs, num_objects, C, N = v.shape\n v = v.view(bs, num_objects * C, N)\n out = v @ affinity\n return out.view(bs, num_objects, C, -1)\n\n def _get_mask_by_ids(self, mask: torch.Tensor, obj_ids: List[int]) -> torch.Tensor:\n # -1 because the mask does not contain the background channel\n return mask[:, [self.object_manager.find_tmp_by_id(obj) - 1 for obj in obj_ids]]\n\n def _get_sensory_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.sensory[obj] for obj in obj_ids], dim=1)\n\n def _get_object_mem_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.obj_v[obj] for obj in obj_ids], dim=1)\n\n def _get_visual_values_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n # All the values that the object ids refer to should have the same shape\n value = torch.stack([self.work_mem.value[obj] for obj in obj_ids], dim=1)\n if self.use_long_term and obj_ids[0] in self.long_mem.value:\n lt_value = torch.stack([self.long_mem.value[obj] for obj in obj_ids], dim=1)\n value = torch.cat([lt_value, value], dim=-1)\n\n return value\n\n def read(self, pix_feat: torch.Tensor, query_key: torch.Tensor, selection: torch.Tensor,\n last_mask: torch.Tensor, network: CUTIE) -> Dict[int, torch.Tensor]:\n \"\"\"\n Read from all memory stores and returns a single memory readout tensor for each object\n\n pix_feat: (1/2) x C x H x W\n query_key: (1/2) x C^k x H x W\n selection: (1/2) x C^k x H x W\n last_mask: (1/2) x num_objects x H x W (at stride 16)\n return a dict of memory readouts, indexed by object indices. Each readout is C*H*W\n \"\"\"\n h, w = pix_feat.shape[-2:]\n bs = pix_feat.shape[0]\n assert query_key.shape[0] == bs\n assert selection.shape[0] == bs\n assert last_mask.shape[0] == bs\n\n query_key = query_key.flatten(start_dim=2) # bs*C^k*HW\n selection = selection.flatten(start_dim=2) # bs*C^k*HW\n \"\"\"\n Compute affinity and perform readout\n \"\"\"\n all_readout_mem = {}\n buckets = self.work_mem.buckets\n for bucket_id, bucket in buckets.items():\n if self.use_long_term and self.long_mem.engaged(bucket_id):\n # Use long-term memory\n long_mem_size = self.long_mem.size(bucket_id)\n memory_key = torch.cat([self.long_mem.key[bucket_id], self.work_mem.key[bucket_id]],\n -1)\n shrinkage = torch.cat(\n [self.long_mem.shrinkage[bucket_id], self.work_mem.shrinkage[bucket_id]], -1)\n\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n \"\"\"\n Record memory usage for working and long-term memory\n \"\"\"\n # ignore the index return for long-term memory\n work_usage = usage[:, long_mem_size:]\n self.work_mem.update_bucket_usage(bucket_id, work_usage)\n\n if self.count_long_term_usage:\n # ignore the index return for working memory\n long_usage = usage[:, :long_mem_size]\n self.long_mem.update_bucket_usage(bucket_id, long_usage)\n else:\n # no long-term memory\n memory_key = self.work_mem.key[bucket_id]\n shrinkage = self.work_mem.shrinkage[bucket_id]\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n\n if self.use_long_term:\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n self.work_mem.update_bucket_usage(bucket_id, usage)\n else:\n affinity = do_softmax(similarity, top_k=self.top_k, inplace=True)\n\n if self.chunk_size < 1:\n object_chunks = [bucket]\n else:\n object_chunks = [\n bucket[i:i + self.chunk_size] for i in range(0, len(bucket), self.chunk_size)\n ]\n\n for objects in object_chunks:\n this_sensory = self._get_sensory_by_ids(objects)\n this_last_mask = self._get_mask_by_ids(last_mask, objects)\n this_msk_value = self._get_visual_values_by_ids(objects) # (1/2)*num_objects*C*N\n visual_readout = self._readout(affinity,\n this_msk_value).view(bs, len(objects), self.CV, h, w)\n pixel_readout = network.pixel_fusion(pix_feat, visual_readout, this_sensory,\n this_last_mask)\n this_obj_mem = self._get_object_mem_by_ids(objects).unsqueeze(2)\n readout_memory, aux_features = network.readout_query(pixel_readout, this_obj_mem)\n for i, obj in enumerate(objects):\n all_readout_mem[obj] = readout_memory[:, i]\n\n if self.save_aux:\n aux_output = {\n 'sensory': this_sensory,\n 'pixel_readout': pixel_readout,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'q_weights': aux_features['q_weights'] if aux_features else None,\n 'p_weights': aux_features['p_weights'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'].float() if aux_features else None,\n }\n self.aux = aux_output\n\n return all_readout_mem\n\n def add_memory(self,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n msk_value: torch.Tensor,\n obj_value: torch.Tensor,\n objects: List[int],\n selection: torch.Tensor = None,\n *,\n as_permanent: bool = False) -> None:\n # key: (1/2)*C*H*W\n # msk_value: (1/2)*num_objects*C*H*W\n # obj_value: (1/2)*num_objects*Q*C\n # objects contains a list of object ids corresponding to the objects in msk_value/obj_value\n bs = key.shape[0]\n assert shrinkage.shape[0] == bs\n assert msk_value.shape[0] == bs\n assert obj_value.shape[0] == bs\n\n self.engaged = True\n if self.H is None or self.config_stale:\n self.config_stale = False\n self.H, self.W = msk_value.shape[-2:]\n self.HW = self.H * self.W\n # convert from num. frames to num. tokens\n self.max_work_tokens = self.max_mem_frames * self.HW\n if self.use_long_term:\n self.min_work_tokens = self.min_mem_frames * self.HW\n\n # key: bs*C*N\n # value: bs*num_objects*C*N\n key = key.flatten(start_dim=2)\n shrinkage = shrinkage.flatten(start_dim=2)\n self.CK = key.shape[1]\n\n msk_value = msk_value.flatten(start_dim=3)\n self.CV = msk_value.shape[2]\n\n if selection is not None:\n # not used in non-long-term mode\n selection = selection.flatten(start_dim=2)\n\n # insert object values into object memory\n for obj_id, obj in enumerate(objects):\n if obj in self.obj_v:\n \"\"\"streaming average\n each self.obj_v[obj] is (1/2)*num_summaries*(embed_dim+1)\n first embed_dim keeps track of the sum of embeddings\n the last dim keeps the total count\n averaging in done inside the object transformer\n\n incoming obj_value is (1/2)*num_objects*num_summaries*(embed_dim+1)\n self.obj_v[obj] = torch.cat([self.obj_v[obj], obj_value[:, obj_id]], dim=0)\n \"\"\"\n last_acc = self.obj_v[obj][:, :, -1]\n new_acc = last_acc + obj_value[:, obj_id, :, -1]\n\n self.obj_v[obj][:, :, :-1] = (self.obj_v[obj][:, :, :-1] +\n obj_value[:, obj_id, :, :-1])\n self.obj_v[obj][:, :, -1] = new_acc\n else:\n self.obj_v[obj] = obj_value[:, obj_id]\n\n # convert mask value tensor into a dict for insertion\n msk_values = {obj: msk_value[:, obj_id] for obj_id, obj in enumerate(objects)}\n self.work_mem.add(key,\n msk_values,\n shrinkage,\n selection=selection,\n as_permanent=as_permanent)\n\n for bucket_id in self.work_mem.buckets.keys():\n # long-term memory cleanup\n if self.use_long_term:\n # Do memory compressed if needed\n if self.work_mem.non_perm_size(bucket_id) >= self.max_work_tokens:\n # Remove obsolete features if needed\n if self.long_mem.non_perm_size(bucket_id) >= (self.max_long_tokens -\n self.num_prototypes):\n self.long_mem.remove_obsolete_features(\n bucket_id,\n self.max_long_tokens - self.num_prototypes - self.buffer_tokens)\n\n self.compress_features(bucket_id)\n else:\n # FIFO\n self.work_mem.remove_old_memory(bucket_id, self.max_work_tokens)\n\n def purge_except(self, obj_keep_idx: List[int]) -> None:\n # purge certain objects from the memory except the one listed\n self.work_mem.purge_except(obj_keep_idx)\n if self.use_long_term and self.long_mem.engaged():\n self.long_mem.purge_except(obj_keep_idx)\n self.sensory = {k: v for k, v in self.sensory.items() if k in obj_keep_idx}\n\n if not self.work_mem.engaged():\n # everything is removed!\n self.engaged = False\n\n def compress_features(self, bucket_id: int) -> None:\n HW = self.HW\n\n # perform memory consolidation\n prototype_key, prototype_value, prototype_shrinkage = self.consolidation(\n *self.work_mem.get_all_sliced(bucket_id, 0, -self.min_work_tokens))\n\n # remove consolidated working memory\n self.work_mem.sieve_by_range(bucket_id,\n 0,\n -self.min_work_tokens,\n min_size=self.min_work_tokens)\n\n # add to long-term memory\n self.long_mem.add(prototype_key,\n prototype_value,\n prototype_shrinkage,\n selection=None,\n supposed_bucket_id=bucket_id)\n\n def consolidation(self, candidate_key: torch.Tensor, candidate_shrinkage: torch.Tensor,\n candidate_selection: torch.Tensor, candidate_value: Dict[int, torch.Tensor],\n usage: torch.Tensor) -> (torch.Tensor, Dict[int, torch.Tensor], torch.Tensor):\n # find the indices with max usage\n bs = candidate_key.shape[0]\n assert bs in [1, 2]\n\n prototype_key = []\n prototype_selection = []\n for bi in range(bs):\n _, max_usage_indices = torch.topk(usage[bi], k=self.num_prototypes, dim=-1, sorted=True)\n prototype_indices = max_usage_indices.flatten()\n prototype_key.append(candidate_key[bi, :, prototype_indices])\n prototype_selection.append(candidate_selection[bi, :, prototype_indices])\n prototype_key = torch.stack(prototype_key, dim=0)\n prototype_selection = torch.stack(prototype_selection, dim=0)\n \"\"\"\n Potentiation step\n \"\"\"\n similarity = get_similarity(candidate_key, candidate_shrinkage, prototype_key,\n prototype_selection)\n affinity = do_softmax(similarity)\n\n # readout the values\n prototype_value = {k: self._readout(affinity, v) for k, v in candidate_value.items()}\n\n # readout the shrinkage term\n prototype_shrinkage = self._readout(affinity, candidate_shrinkage)\n\n return prototype_key, prototype_value, prototype_shrinkage\n\n def initialize_sensory_if_needed(self, sample_key: torch.Tensor, ids: List[int]):\n for obj in ids:\n if obj not in self.sensory:\n # also initializes the sensory memory\n bs, _, h, w = sample_key.shape\n self.sensory[obj] = torch.zeros((bs, self.sensory_dim, h, w),\n device=sample_key.device)\n\n def update_sensory(self, sensory: torch.Tensor, ids: List[int]):\n # sensory: 1*num_objects*C*H*W\n for obj_id, obj in enumerate(ids):\n self.sensory[obj] = sensory[:, obj_id]\n\n def get_sensory(self, ids: List[int]):\n # returns (1/2)*num_objects*C*H*W\n return self._get_sensory_by_ids(ids)\n \n def clear_non_permanent_memory(self):\n self.work_mem.clear_non_permanent_memory()\n if self.use_long_term:\n self.long_mem.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.sensory = {}" }, { "identifier": "ObjectManager", "path": "cutie/inference/object_manager.py", "snippet": "class ObjectManager:\n \"\"\"\n Object IDs are immutable. The same ID always represent the same object.\n Temporary IDs are the positions of each object in the tensor. It changes as objects get removed.\n Temporary IDs start from 1.\n \"\"\"\n def __init__(self):\n self.obj_to_tmp_id: Dict[ObjectInfo, int] = {}\n self.tmp_id_to_obj: Dict[int, ObjectInfo] = {}\n self.obj_id_to_obj: Dict[int, ObjectInfo] = {}\n\n self.all_historical_object_ids: List[int] = []\n\n def _recompute_obj_id_to_obj_mapping(self) -> None:\n self.obj_id_to_obj = {obj.id: obj for obj in self.obj_to_tmp_id}\n\n def add_new_objects(\n self, objects: Union[List[ObjectInfo], ObjectInfo,\n List[int]]) -> (List[int], List[int]):\n if not isinstance(objects, list):\n objects = [objects]\n\n corresponding_tmp_ids = []\n corresponding_obj_ids = []\n for obj in objects:\n if isinstance(obj, int):\n obj = ObjectInfo(id=obj)\n\n if obj in self.obj_to_tmp_id:\n # old object\n corresponding_tmp_ids.append(self.obj_to_tmp_id[obj])\n corresponding_obj_ids.append(obj.id)\n else:\n # new object\n new_obj = ObjectInfo(id=obj.id)\n\n # new object\n new_tmp_id = len(self.obj_to_tmp_id) + 1\n self.obj_to_tmp_id[new_obj] = new_tmp_id\n self.tmp_id_to_obj[new_tmp_id] = new_obj\n self.all_historical_object_ids.append(new_obj.id)\n corresponding_tmp_ids.append(new_tmp_id)\n corresponding_obj_ids.append(new_obj.id)\n\n self._recompute_obj_id_to_obj_mapping()\n assert corresponding_tmp_ids == sorted(corresponding_tmp_ids)\n return corresponding_tmp_ids, corresponding_obj_ids\n\n def delete_object(self, obj_ids_to_remove: Union[int, List[int]]) -> None:\n # delete an object or a list of objects\n # re-sort the tmp ids\n if isinstance(obj_ids_to_remove, int):\n obj_ids_to_remove = [obj_ids_to_remove]\n\n new_tmp_id = 1\n total_num_id = len(self.obj_to_tmp_id)\n\n local_obj_to_tmp_id = {}\n local_tmp_to_obj_id = {}\n\n for tmp_iter in range(1, total_num_id + 1):\n obj = self.tmp_id_to_obj[tmp_iter]\n if obj.id not in obj_ids_to_remove:\n local_obj_to_tmp_id[obj] = new_tmp_id\n local_tmp_to_obj_id[new_tmp_id] = obj\n new_tmp_id += 1\n\n self.obj_to_tmp_id = local_obj_to_tmp_id\n self.tmp_id_to_obj = local_tmp_to_obj_id\n self._recompute_obj_id_to_obj_mapping()\n\n def purge_inactive_objects(self,\n max_missed_detection_count: int) -> (bool, List[int], List[int]):\n # remove tmp ids of objects that are removed\n obj_id_to_be_deleted = []\n tmp_id_to_be_deleted = []\n tmp_id_to_keep = []\n obj_id_to_keep = []\n\n for obj in self.obj_to_tmp_id:\n if obj.poke_count > max_missed_detection_count:\n obj_id_to_be_deleted.append(obj.id)\n tmp_id_to_be_deleted.append(self.obj_to_tmp_id[obj])\n else:\n tmp_id_to_keep.append(self.obj_to_tmp_id[obj])\n obj_id_to_keep.append(obj.id)\n\n purge_activated = len(obj_id_to_be_deleted) > 0\n if purge_activated:\n self.delete_object(obj_id_to_be_deleted)\n return purge_activated, tmp_id_to_keep, obj_id_to_keep\n\n def tmp_to_obj_cls(self, mask) -> torch.Tensor:\n # remap tmp id cls representation to the true object id representation\n new_mask = torch.zeros_like(mask)\n for tmp_id, obj in self.tmp_id_to_obj.items():\n new_mask[mask == tmp_id] = obj.id\n return new_mask\n\n def get_tmp_to_obj_mapping(self) -> Dict[int, ObjectInfo]:\n # returns the mapping in a dict format for saving it with pickle\n return {obj.id: tmp_id for obj, tmp_id in self.tmp_id_to_obj.items()}\n\n def realize_dict(self, obj_dict, dim=1) -> torch.Tensor:\n # turns a dict indexed by obj id into a tensor, ordered by tmp IDs\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n if obj.id not in obj_dict:\n raise NotImplementedError\n output.append(obj_dict[obj.id])\n output = torch.stack(output, dim=dim)\n return output\n\n def make_one_hot(self, cls_mask) -> torch.Tensor:\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n output.append(cls_mask == obj.id)\n if len(output) == 0:\n output = torch.zeros((0, *cls_mask.shape), dtype=torch.bool, device=cls_mask.device)\n else:\n output = torch.stack(output, dim=0)\n return output\n\n @property\n def all_obj_ids(self) -> List[int]:\n return [k.id for k in self.obj_to_tmp_id]\n\n @property\n def num_obj(self) -> int:\n return len(self.obj_to_tmp_id)\n\n def has_all(self, objects: List[int]) -> bool:\n for obj in objects:\n if obj not in self.obj_to_tmp_id:\n return False\n return True\n\n def find_object_by_id(self, obj_id) -> ObjectInfo:\n return self.obj_id_to_obj[obj_id]\n\n def find_tmp_by_id(self, obj_id) -> int:\n return self.obj_to_tmp_id[self.obj_id_to_obj[obj_id]]" }, { "identifier": "ImageFeatureStore", "path": "cutie/inference/image_feature_store.py", "snippet": "class ImageFeatureStore:\n \"\"\"\n A cache for image features.\n These features might be reused at different parts of the inference pipeline.\n This class provide an interface for reusing these features.\n It is the user's responsibility to delete redundant features.\n\n Feature of a frame should be associated with a unique index -- typically the frame id.\n \"\"\"\n def __init__(self, network: CUTIE, no_warning: bool = False):\n self.network = network\n self._store = {}\n self.no_warning = no_warning\n\n def _encode_feature(self, index: int, image: torch.Tensor) -> None:\n ms_features, pix_feat = self.network.encode_image(image)\n key, shrinkage, selection = self.network.transform_key(ms_features[0])\n self._store[index] = (ms_features, pix_feat, key, shrinkage, selection)\n\n def get_features(self, index: int,\n image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][:2]\n\n def get_key(self, index: int,\n image: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][2:]\n\n def delete(self, index: int) -> None:\n if index in self._store:\n del self._store[index]\n\n def __len__(self):\n return len(self._store)\n\n def __del__(self):\n if len(self._store) > 0 and not self.no_warning:\n warnings.warn(f'Leaking {self._store.keys()} in the image feature store')" }, { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "pad_divide_by", "path": "cutie/utils/tensor_utils.py", "snippet": "def pad_divide_by(in_img: torch.Tensor, d: int) -> (torch.Tensor, Iterable[int]):\n h, w = in_img.shape[-2:]\n\n if h % d > 0:\n new_h = h + d - h % d\n else:\n new_h = h\n if w % d > 0:\n new_w = w + d - w % d\n else:\n new_w = w\n lh, uh = int((new_h - h) / 2), int(new_h - h) - int((new_h - h) / 2)\n lw, uw = int((new_w - w) / 2), int(new_w - w) - int((new_w - w) / 2)\n pad_array = (int(lw), int(uw), int(lh), int(uh))\n out = F.pad(in_img, pad_array)\n return out, pad_array" }, { "identifier": "unpad", "path": "cutie/utils/tensor_utils.py", "snippet": "def unpad(img: torch.Tensor, pad: Iterable[int]) -> torch.Tensor:\n if len(img.shape) == 4:\n if pad[2] + pad[3] > 0:\n img = img[:, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, pad[0]:-pad[1]]\n elif len(img.shape) == 3:\n if pad[2] + pad[3] > 0:\n img = img[:, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, pad[0]:-pad[1]]\n elif len(img.shape) == 5:\n if pad[2] + pad[3] > 0:\n img = img[:, :, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, :, pad[0]:-pad[1]]\n else:\n raise NotImplementedError\n return img" }, { "identifier": "aggregate", "path": "cutie/utils/tensor_utils.py", "snippet": "def aggregate(prob: torch.Tensor, dim: int) -> torch.Tensor:\n with torch.cuda.amp.autocast(enabled=False):\n prob = prob.float()\n new_prob = torch.cat([torch.prod(1 - prob, dim=dim, keepdim=True), prob],\n dim).clamp(1e-7, 1 - 1e-7)\n logits = torch.log((new_prob / (1 - new_prob)))\n\n return logits" } ]
from typing import List, Optional, Iterable, Dict from omegaconf import DictConfig from cutie.inference.memory_manager import MemoryManager from cutie.inference.object_manager import ObjectManager from cutie.inference.image_feature_store import ImageFeatureStore from cutie.model.cutie import CUTIE from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate import logging import numpy as np import torch import torch.nn.functional as F
11,064
shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager() self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager) if image_feature_store is None: self.image_feature_store = ImageFeatureStore(self.network) else: self.image_feature_store = image_feature_store self.last_mask = None def clear_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager) def clear_non_permanent_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_non_permanent_memory() def clear_sensory_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_sensory_memory() def update_config(self, cfg): self.mem_every = cfg['mem_every'] self.memory.update_config(cfg) def _add_memory(self, image: torch.Tensor, pix_feat: torch.Tensor, prob: torch.Tensor, key: torch.Tensor, shrinkage: torch.Tensor, selection: torch.Tensor, *, is_deep_update: bool = True, force_permanent: bool = False) -> None: """ Memorize the given segmentation in all memory stores. The batch dimension is 1 if flip augmentation is not used. image: RGB image, (1/2)*3*H*W pix_feat: from the key encoder, (1/2)*_*H*W prob: (1/2)*num_objects*H*W, in [0, 1] key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W selection can be None if not using long-term memory is_deep_update: whether to use deep update (e.g. with the mask encoder) force_permanent: whether to force the memory to be permanent """ if prob.shape[1] == 0: # nothing to add log.warn('Trying to add an empty object mask to memory!') return if force_permanent: as_permanent = 'all' else: as_permanent = 'first' self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids) msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask( image, pix_feat, self.memory.get_sensory(self.object_manager.all_obj_ids), prob, deep_update=is_deep_update, chunk_size=self.chunk_size, need_weights=self.save_aux) self.memory.add_memory(key, shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1
image, self.pad = pad_divide_by(image, 16)
4
2023-10-19 17:49:24+00:00
16k
stanford-oval/WikiChat
benchmark/scripts/prepare_for_scale.py
[ { "identifier": "DialogueTurn", "path": "pipelines/dialog_turn.py", "snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = None,\n draft_engine: str = None,\n ):\n self.engine = engine\n self.generate_engine = generate_engine\n self.draft_engine = draft_engine\n self.pipeline = pipeline\n self.wall_time_seconds = (\n 0 # how much time it took to generate this turn, in seconds\n )\n self.agent_utterance = agent_utterance\n self.user_utterance = user_utterance\n\n # retrieve_and_generate pipeline\n self.initial_search_query = None\n self.initial_search_query_time = None\n self.initial_search_results = []\n self.initial_search_result_titles = []\n self.initial_search_bullets = []\n\n # generate_and_correct pipeline\n self.llm_utterance = None\n self.claims = []\n self.verification_retrieval_results = {}\n self.verification_result = {}\n\n # early_combine pipeline\n self.combined_evidences = []\n self.combined_utterance = None\n self.feedback = []\n self.feedback_scores = []\n self.refined_utterance = None\n\n def _summarize_vc_log(self):\n verification_summary = {}\n assert len(self.verification_result) == len(\n self.verification_retrieval_results\n ), \"We need to have retrieved evidence for all claims\"\n for key, value in self.verification_retrieval_results.items():\n claim_idx = int(key)\n v_ret_results = []\n for v in value:\n title, paragraph, score = tuple(v)\n v_ret_results.append(\n {\"title\": title, \"paragraph\": paragraph, \"score\": round(score, 1)}\n )\n verification_summary[self.claims[claim_idx][0]] = OrderedDict(\n {\n \"label\": self.verification_result[claim_idx][\"label\"],\n \"fixed_claim\": self.verification_result[claim_idx][\"fixed_claim\"],\n \"retrieval_results\": v_ret_results,\n }\n )\n return verification_summary\n\n def _summarize_rg_log(self):\n rg_summary = {\n \"initial_search_query\": self.initial_search_query,\n \"initial_search_query_time\": self.initial_search_query_time,\n \"initial_search_bullets\": self.initial_search_bullets,\n \"initial_search_results\": [],\n }\n\n for i in range(len(self.initial_search_results)):\n rg_summary[\"initial_search_results\"].append(\n {\n \"title\": self.initial_search_result_titles[i],\n \"paragraph\": self.initial_search_results[i],\n # 'bullets': self.initial_search_bullets,\n }\n )\n\n return rg_summary\n\n def log(self):\n \"\"\"\n Returns a json object that contains all information inside `self`\n \"\"\"\n # combine fields into a more human-readable field\n verification_summary = self._summarize_vc_log()\n rg_summary = self._summarize_rg_log()\n\n return OrderedDict(\n {\n # retrieve_and_generate pipeline\n \"retrieve_and_generate\": rg_summary,\n # generate_and_correct pipeline\n \"llm_utterance\": self.llm_utterance,\n \"generate_and_correct\": verification_summary,\n # early_combine pipeline\n \"combined_evidences\": self.combined_evidences,\n \"combined_utterance\": self.combined_utterance,\n \"feedback\": self.feedback,\n \"feedback_scores\": self.feedback_scores,\n \"refined_utterance\": self.refined_utterance,\n \"user_utterance\": self.user_utterance,\n \"agent_utterance\": self.agent_utterance,\n \"engine\": self.engine,\n \"generate_engine\": self.generate_engine,\n \"draft_engine\": self.draft_engine,\n \"pipeline\": self.pipeline,\n \"wall_time_seconds\": round(self.wall_time_seconds, 1),\n }\n )\n\n @staticmethod\n def utterance_list_to_dialog_history(utterance_list: List[str]):\n \"\"\"\n The resulting dialog history will not have all the fields correctly initialized, since no information about e.g. search queries is available\n \"\"\"\n dialog_history = []\n assert (\n len(utterance_list) % 2 == 1\n ), \"The first turn is always the user, and the turn to be generated is always the agent, so the number of turns should be odd\"\n for i in range(0, len(utterance_list) - 2, 2):\n dialog_history.append(\n DialogueTurn(\n user_utterance=utterance_list[i],\n agent_utterance=utterance_list[i + 1],\n )\n )\n user_utterance = utterance_list[-1]\n\n return dialog_history, user_utterance\n\n @staticmethod\n def dialog_history_to_utterance_list(dialog_history) -> List[str]:\n \"\"\"\n Convert a list of DialogueTurns to a list of strings\n \"\"\"\n utterance_list = []\n for turn in dialog_history:\n utterance_list.append(turn.user_utterance)\n utterance_list.append(turn.agent_utterance)\n return utterance_list" }, { "identifier": "Chatbot", "path": "pipelines/chatbot.py", "snippet": "class Chatbot:\n \"\"\"\n A stateless chatbot. Stateless means that it does not store the history of the dialog in itself, but requires it as an input\n \"\"\"\n\n def __init__(self, args) -> None:\n # Initialize everything, because we can change the pipeline on the fly using system_parameters\n self.claim_splitter = ClaimSplitter(args.claim_prompt_template_file)\n self.evi_num = args.evi_num\n self.colbert_endpoint = args.colbert_endpoint\n self.retrieval_num = args.retrieval_num\n self.refiner = Refiner(prompt=args.refinement_prompt, args=args)\n\n self.temperature = args.temperature\n self.max_tokens = args.max_tokens\n self.top_p = args.top_p\n self.presence_penalty = args.presence_penalty\n self.frequency_penalty = args.frequency_penalty\n self.skip_verification = args.skip_verification\n\n # default parameters, can be overridden:\n self.engine = args.engine\n self.generate_engine = args.generate_engine\n self.draft_engine = args.draft_engine\n self.do_refine=args.do_refine\n self.fuse_claim_splitting = args.fuse_claim_splitting\n\n def generate_next_turn(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n pipeline: str,\n system_parameters: dict = {},\n ):\n \"\"\"\n Generate the next turn of the dialog\n system_parameters: can override some of the default parameters defined in __init__()\n \"\"\"\n # throw error if system_parameters contains keys that are not supported\n for key in system_parameters:\n assert key in [\n \"engine\",\n \"generate_engine\",\n \"draft_engine\",\n \"fuse_claim_splitting\",\n \"do_refine\",\n ], f\"Unsupported system_parameter key: {key}\"\n\n engine = system_parameters.get(\"engine\", self.engine)\n generate_engine = system_parameters.get(\"generate_engine\", self.generate_engine)\n if generate_engine is None:\n # this means that the default `generate_engine` was not provided via commandline, and system_parameters is not override it either.\n # So default to `engine`\n generate_engine = engine\n draft_engine = system_parameters.get(\"draft_engine\", self.draft_engine)\n if draft_engine is None:\n draft_engine = engine\n fuse_claim_splitting = system_parameters.get(\"fuse_claim_splitting\", self.fuse_claim_splitting)\n engine_dict = {\"default\": engine, \"generate\": generate_engine, \"draft\": draft_engine}\n do_refine = system_parameters.get(\"do_refine\", self.do_refine)\n\n start_time = time.time()\n\n if pipeline == \"generate_and_correct\":\n new_dlg_turn = self.generate_and_correct_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"retrieve_and_generate\":\n new_dlg_turn = self.retrieve_and_generate_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"generate\":\n reply = self._generate_only(\n \"baseline_chatbot.prompt\",\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n new_dlg_turn.llm_utterance = reply\n new_dlg_turn.agent_utterance = reply\n elif pipeline == \"retrieve_only\":\n new_dlg_turn = self.retrieve_only_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"early_combine\":\n new_dlg_turn = self.early_combine_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n fuse_claim_splitting=fuse_claim_splitting\n )\n else:\n raise ValueError\n\n if do_refine == \"True\" or do_refine == \"true\" or do_refine == True:\n do_refine = True\n else:\n do_refine = False\n\n if do_refine:\n prerefinement_agent_utterance = new_dlg_turn.agent_utterance\n new_dlg_turn.agent_utterance = self.refiner.set_refinement_fields(\n object_dlg_history, new_dlg_turn, engine_dict=engine_dict\n )\n if new_dlg_turn.agent_utterance == prerefinement_agent_utterance:\n logger.info(\"Refinement did NOT change the agent utterance\")\n\n new_dlg_turn.engine = engine\n new_dlg_turn.generate_engine = generate_engine\n new_dlg_turn.draft_engine = draft_engine\n new_dlg_turn.pipeline = pipeline\n\n end_time = time.time()\n new_dlg_turn.wall_time_seconds = end_time - start_time\n\n return new_dlg_turn\n\n def retrieve_only_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n # search based on the history of the dialog so far\n search_prompt_output = llm_generate(\n template_file=\"query.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_user_utterance\": new_user_utterance,\n \"force_search\": True,\n },\n engine=engine_dict[\"default\"],\n max_tokens=50,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=[\"\\n\"],\n postprocess=False,\n )\n search_prompt_output = (\n \"Yes. \" + search_prompt_output\n ) # because we are forcing a search\n self._handle_search_prompt_output(\n search_prompt_output=search_prompt_output,\n new_dlg_turn=new_dlg_turn,\n num_paragraphs=1,\n summarize_results=False,\n engine_dict=engine_dict,\n )\n\n paragraph = new_dlg_turn.initial_search_results[\n 0\n ] # we only retrieve one paragraph\n title = new_dlg_turn.initial_search_result_titles[0]\n new_dlg_turn.agent_utterance = (\n 'I found an article titled \"' + title + '\": ' + paragraph\n )\n return new_dlg_turn\n\n def retrieve_and_generate_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n reply = self._retrieve_and_generate(\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n new_dlg_turn.agent_utterance = reply\n\n return new_dlg_turn\n\n def generate_and_correct_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: str,\n ):\n \"\"\"\n Verify and correct the last turn of a given dialog using retrieved evidences\n Args:\n - `object_dlg_history` (list): previous dialog turns\n - `new_user_utterance` (str): last user utterance\n Returns:\n - `corrected_reply` (str): corrected LLM response\n - `new_dialog_turn` (DialogTurn)\n \"\"\"\n original_reply = self._generate_only(\n \"generate.prompt\",\n object_dlg_history,\n new_user_utterance,\n engine_dict=engine_dict,\n )\n\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n new_dlg_turn.llm_utterance = original_reply\n\n new_dlg_turn.agent_utterance = self._generate_and_correct_reply(\n object_dlg_history,\n new_user_utterance,\n original_reply,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n\n return new_dlg_turn\n\n def early_combine_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: str,\n fuse_claim_splitting: bool\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n\n # gather evidence from two routs in parallel\n with ThreadPoolExecutor(2) as executor:\n search_summary = executor.submit(\n self._search_and_summarize,\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n supported_claims = executor.submit(\n self._generate_split_and_fact_check,\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n fuse_claim_splitting=fuse_claim_splitting\n )\n search_summary = search_summary.result()\n supported_claims = supported_claims.result()\n\n combined_evi = search_summary + supported_claims\n # logger.info('Combined evidences: %s', new_dlg_turn.combined_evidences)\n new_dlg_turn.combined_evidences = combined_evi\n\n if not combined_evi:\n logger.info(\"Combined evidence is empty\")\n # if new_dlg_turn.initial_search_query is None:\n # new_dlg_turn.combined_utterance = original_reply # no search needed, so return the original chitchat response\n # else:\n # new_dlg_turn.combined_utterance = \"Sorry, I'm not sure.\" # will become more conversational after refinement\n # else:\n new_dlg_turn.combined_utterance = self._reply_using_combined_evidence(\n object_dlg_history,\n new_user_utterance,\n combined_evi,\n engine_dict=engine_dict,\n )\n new_dlg_turn.agent_utterance = new_dlg_turn.combined_utterance\n\n return new_dlg_turn\n\n def _handle_search_prompt_output(\n self,\n search_prompt_output: str,\n new_dlg_turn: DialogueTurn,\n num_paragraphs,\n summarize_results: bool,\n engine_dict: dict,\n ):\n \"\"\"\n Updates `new_dlg_turn` with logs\n A sample output is: Yes. You Google \"James E. Webb the administrator of NASA\". The year of the results is \"none\".]\n \"\"\"\n reranking_factor = 3 # we will retrieve num_paragraphs * reranking_factor paragraphs before reranking them\n\n search_prompt_output = search_prompt_output.strip()\n search_pattern = (\n r'Yes\\. You.*\"([^\"]*)\".* The year of the results is \"([^=]*)\"\\.]?'\n )\n search_match = re.match(search_pattern, search_prompt_output)\n\n if search_prompt_output.startswith(\"No\"):\n # sometimes LLM outputs No. with extra explanation afterwards instead of ']', or \"No search needed\". So this more lax condition leads to fewer Exceptions\n logger.info(\"No search needed.\")\n elif search_match:\n search_query = search_match.group(1)\n search_query_time = search_match.group(2)\n y = extract_year(title=\"\", passage=search_query)\n if len(y) > 0:\n logger.info(\"Overriding query year\")\n search_query_time = y[0]\n logger.info(\"search_query = %s\", search_query)\n logger.info(\"search_query_time = %s\", search_query_time)\n\n # retrieve more paragraphs so that we can do date-based reranking (if needed) and skip \"None\" summaries (if any)\n paragraphs, scores, titles = self._colbert_retrieve(\n query=search_query,\n num_paragraphs=num_paragraphs * reranking_factor,\n rerank=search_query_time,\n )\n\n logger.info(\"Colbert titles: %s\", str(titles))\n\n if summarize_results:\n bullets = []\n not_none_paragraphs = []\n not_none_titles = []\n # summarize in batches, until we reach `num_paragraphs` paragraphs that are deemed relevant\n for start_idx in range(\n 0, num_paragraphs * reranking_factor, num_paragraphs\n ):\n b, not_none_paragraph_indices = self._summarize_results(\n search_query,\n paragraphs[start_idx : start_idx + num_paragraphs],\n titles[start_idx : start_idx + num_paragraphs],\n maximum_paragraphs_needed=num_paragraphs\n - len(not_none_paragraphs),\n engine_dict=engine_dict,\n )\n # print(\"not_none_paragraph_indices = \", not_none_paragraph_indices)\n not_none_paragraphs += [\n paragraphs[start_idx + i] for i in not_none_paragraph_indices\n ]\n not_none_titles += [\n titles[start_idx + i] for i in not_none_paragraph_indices\n ]\n bullets = bullets + b\n assert len(not_none_paragraphs) <= num_paragraphs\n if len(not_none_paragraphs) == num_paragraphs:\n break\n titles = not_none_titles\n paragraphs = not_none_paragraphs\n\n else:\n paragraphs = paragraphs[:num_paragraphs]\n titles = titles[:num_paragraphs]\n bullets = None\n\n # log everything\n new_dlg_turn.initial_search_query = search_query\n new_dlg_turn.initial_search_query_time = search_query_time\n new_dlg_turn.initial_search_results = paragraphs\n new_dlg_turn.initial_search_result_titles = titles\n new_dlg_turn.initial_search_bullets = bullets\n else:\n raise ValueError(\n \"Search prompt's output is invalid: %s\" % search_prompt_output\n )\n # logger.error('Search prompt\\'s output is invalid: %s' % search_prompt_output)\n\n def _summarize_results(\n self,\n search_query,\n paragraphs,\n titles,\n maximum_paragraphs_needed,\n engine_dict,\n ):\n \"\"\"\n Summarizes `paragraphs` and returns the indices of at most `maximum_paragraphs_needed` paragraphs that are deemed relevant to the `query`\n \"\"\"\n summaries = llm_generate(\n template_file=\"summarize_and_filter.prompt\",\n prompt_parameter_values=[\n {\"title\": t, \"article\": p, \"query\": search_query}\n for (t, p) in zip(titles, paragraphs)\n ],\n engine=engine_dict[\"default\"],\n max_tokens=200,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=None,\n postprocess=False,\n )\n bullets = []\n not_none_paragraph_indices = []\n for paragraph_idx, s in enumerate(summaries):\n if s.startswith(\"Yes. \"):\n # necessary for distilled models\n s = s[5:]\n if s.startswith(\"None\") or s == \"- None\" or s == \"-None\":\n # skip the None paragraphs\n logger.info(\n \"This retrieved paragraphs was deemed unrelated: %s\",\n paragraphs[paragraph_idx],\n )\n continue\n not_none_paragraph_indices.append(paragraph_idx)\n for b in s.split(\"\\n-\"):\n b = b.strip()\n if len(b) == 0:\n continue\n if not b.endswith(\".\"):\n # most likely a partial generation that was cut off because of max_tokens\n continue\n bullets.append(b.strip(\"- \"))\n if len(not_none_paragraph_indices) == maximum_paragraphs_needed:\n break\n\n return bullets, not_none_paragraph_indices\n\n def _retrieve_and_generate(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Retrieves related documents and generates a reply base on them, given the dialog history\n Updates `new_dlg_turn` with logs\n Returns reply\n \"\"\"\n self._search_and_summarize(\n object_dlg_history, new_user_utterance, new_dlg_turn, engine_dict\n )\n\n reply = llm_generate(\n template_file=\"draft.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": new_user_utterance,\n \"evidences\": new_dlg_turn.initial_search_bullets,\n },\n engine=engine_dict[\"default\"],\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n top_p=self.top_p,\n presence_penalty=self.presence_penalty,\n stop_tokens=[\"\\n\"],\n postprocess=True,\n )\n return reply\n\n def _search_and_summarize(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ):\n search_prompt_output = llm_generate(\n template_file=\"query.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_user_utterance\": new_user_utterance,\n \"force_search\": False,\n },\n engine=engine_dict[\"default\"],\n max_tokens=50,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=[\"\\n\"],\n postprocess=False,\n )\n self._handle_search_prompt_output(\n search_prompt_output=search_prompt_output,\n new_dlg_turn=new_dlg_turn,\n num_paragraphs=self.retrieval_num,\n summarize_results=True,\n engine_dict=engine_dict,\n )\n return new_dlg_turn.initial_search_bullets\n\n def _generate_split_and_fact_check(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n fuse_claim_splitting: bool\n ):\n original_reply = self._generate_only(\n \"generate.prompt\",\n object_dlg_history,\n new_user_utterance,\n engine_dict=engine_dict,\n )\n if not fuse_claim_splitting:\n new_dlg_turn.llm_utterance = original_reply\n claims_output = None\n else:\n new_dlg_turn.llm_utterance = None\n claims_output = original_reply\n\n\n claims = self.claim_splitter.split_claim(\n dialog_history=object_dlg_history,\n new_user_utterance=new_user_utterance,\n current_agent_utterance=original_reply,\n engine_dict=engine_dict,\n claims_output=claims_output\n )\n\n new_dlg_turn.claims = claims\n if not claims:\n logger.info(\"No claims to check\")\n return []\n\n # retrieve evidence\n ret_output = self._retrieve_evidences(claims)\n\n # verify claims\n ver_output = self._verify_claims(\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct=False,\n engine_dict=engine_dict,\n )\n\n new_dlg_turn.verification_retrieval_results = ret_output\n new_dlg_turn.verification_result = ver_output\n\n # only keep supported claim\n supported_claims = []\n for label_fix in ver_output:\n verification_label, fixed_claim = (\n label_fix[\"label\"],\n label_fix[\"fixed_claim\"],\n )\n if verification_label == \"SUPPORTS\":\n supported_claims.append(fixed_claim)\n return supported_claims\n\n def _generate_and_correct_reply(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n original_reply: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Verifies and corrects `original_reply` given the dialog history\n Updates `new_dlg_turn` with logs\n Returns corrected reply\n \"\"\"\n # split claims\n # the returned \"claims\" is a list of tuples (claim, year)\n claims = self.claim_splitter.split_claim(\n dialog_history=object_dlg_history,\n new_user_utterance=new_user_utterance,\n current_agent_utterance=original_reply,\n engine_dict=engine_dict,\n )\n claims = ClaimSplitter.remove_claims_from_previous_turns(claims, object_dlg_history)\n if not claims:\n logger.info(\"No claims to check\")\n return original_reply\n new_dlg_turn.claims = claims\n\n # retrieve evidence\n ret_output = self._retrieve_evidences(claims)\n\n # TODO: use the ret_output together with initial search outputs for verification\n # verify claims\n ver_output = self._verify_claims(\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct=True,\n engine_dict=engine_dict,\n )\n\n # update dialog turn\n new_dlg_turn.verification_retrieval_results = ret_output\n new_dlg_turn.verification_result = ver_output\n if is_everything_verified(ver_output):\n logger.info(\"All claims passed verification, nothing to correct\")\n return original_reply\n\n # correction\n corrected_reply = original_reply\n fixed_claims = []\n for label_fix in ver_output:\n verification_label, fixed_claim = (\n label_fix[\"label\"],\n label_fix[\"fixed_claim\"],\n )\n if (\n verification_label == \"SUPPORTS\"\n ): # if the claim is already correct, no need to fix\n continue\n fixed_claims.append(fixed_claim)\n assert len(fixed_claims) > 0\n corrected_reply = self._correct(\n original_reply,\n object_dlg_history,\n new_user_utterance,\n fixed_claims, # corrected claim for REFUTE and \"I'm not sure\" for NOT ENOUGH INFO claims.\n engine_dict=engine_dict,\n )\n\n return corrected_reply\n\n def _generate_only(\n self,\n generation_prompt: str,\n dialog_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Generate baseline LLM response\n Args:\n - `generation_prompt` (str): the .prompt file to use for this stage\n - `dialog_history` (list): previous turns\n Returns:\n - `reply`(str): original LLM response\n \"\"\"\n reply = llm_generate(\n template_file=generation_prompt,\n prompt_parameter_values={\n \"dlg\": dialog_history,\n \"new_user_utterance\": new_user_utterance,\n \"engine_name\": engine_dict[\"generate\"] # used to enforce model knowledge cut-off date for models other than GPT-4\n },\n engine=engine_dict[\"generate\"],\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n stop_tokens=[\"\\n\"],\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return reply\n\n def _correct(\n self,\n original_reply,\n object_dlg_history,\n last_user_utterance,\n fixed_claims,\n engine_dict: dict,\n ):\n \"\"\"\n Given context + original response + evidence for a claim, fix the original response\n\n Args:\n - `original_reply`(str): LLM's original response\n - `object_dlg_history`(list): list of previous DialogueTurns\n - `last_user_utterance` (str): last user utterance\n - `fixed_claims` (list): list of fixed claims\n Returns:\n - `corrected_reply`(str): corrected LLM response\n \"\"\"\n # correction prompt's context should be in one line\n correction_reply = llm_generate(\n template_file=\"correction_combiner.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": last_user_utterance,\n \"original_reply\": original_reply,\n \"fixed_claims\": fixed_claims,\n },\n engine=engine_dict[\"default\"],\n max_tokens=self.max_tokens,\n temperature=0,\n stop_tokens=[\"\\n\"],\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return correction_reply\n\n def _reply_using_combined_evidence(\n self,\n object_dlg_history,\n last_user_utterance,\n evidences,\n engine_dict: dict,\n ):\n combined_reply = llm_generate(\n template_file=\"draft.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": last_user_utterance,\n \"evidences\": evidences,\n },\n engine=engine_dict[\"draft\"],\n max_tokens=self.max_tokens,\n temperature=0,\n stop_tokens=None,\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return combined_reply\n\n def _colbert_retrieve(\n self,\n query: str,\n num_paragraphs: int,\n rerank=\"none\",\n top_p=1,\n ):\n \"\"\"\n Args:\n `num_paragraphs`: number of paragraphs that will be output\n `rerank` (str): one of 'none', 'recent' or a year like '2005'. 'none' disables reranking. 'recent' retrieves more and returns the most recent ones.\n '2005' boosts the ranking of results that match 2005. The date of a result is determined by the year numbers it contains.\n `top_p` (float): chooses from the smallest possible set of results whose cumulative probability exceeds top_p\n Returns:\n `passages` (list): a list of passage texts (excluding the title) with the highest similarities to the `query`\n `passage_scores` (list): a list of similarity scores of each passage in `passsages` with `query`\n `passage_titles` (list): a list of passage titles\n \"\"\"\n\n # print(self.colbert_endpoint, {'query': query, 'evi_num': num_paragraphs})\n response = requests.get(\n self.colbert_endpoint,\n json={\"query\": query, \"evi_num\": num_paragraphs},\n )\n if response.status_code != 200:\n raise Exception(\"ColBERT Search API Error: %s\" % str(response))\n results = response.json()\n passages = []\n passage_titles = []\n for r in results[\"passages\"]:\n r = r.split(\"|\", maxsplit=1)\n passage_titles.append(r[0].strip())\n passages.append(r[1].strip())\n scores = results[\"passage_scores\"]\n probs = results[\"passage_probs\"]\n # print(\"probs = \", probs)\n top_p_cut_off = np.cumsum(probs) > top_p\n if not np.any(top_p_cut_off):\n # even if we include everything, we don't get to top_p\n top_p_cut_off = len(scores)\n else:\n top_p_cut_off = np.argmax(top_p_cut_off) + 1\n # print(\"top_p_cut_off = \", top_p_cut_off)\n passages, scores, passage_titles = (\n passages[:top_p_cut_off],\n scores[:top_p_cut_off],\n passage_titles[:top_p_cut_off],\n )\n\n if rerank == \"none\":\n pass\n else:\n all_passage_dates = []\n for t, p in zip(passage_titles, passages):\n passage_years = extract_year(title=t, passage=p)\n all_passage_dates.append(passage_years)\n if rerank == \"recent\":\n sort_fn = lambda x: max(\n x[3] if len(x[3]) > 0 else [0]\n ) # sort based on the latest year mentioned in the paragraph, demoting paragraphs that don't mention a year\n else:\n # rerank is a year\n try:\n query_year = int(rerank)\n except ValueError as e:\n # raise ValueError('rerank should be none, recent or an integer.')\n logger.error(e)\n return (\n passages[:num_paragraphs],\n scores[:num_paragraphs],\n passage_titles[:num_paragraphs],\n )\n sort_fn = lambda x: x[3].count(\n query_year\n ) # boost the passages that have a matching year with the query, the more they mention the date the more we boost\n\n # logger.info('Search result dates before date-based reranking: %s', str(all_passage_dates))\n passages, scores, passage_titles, all_passage_dates = list(\n zip(\n *sorted(\n zip(passages, scores, passage_titles, all_passage_dates),\n reverse=True,\n key=sort_fn,\n )\n )\n )\n # logger.info('Search result dates after date-based reranking: %s', str(all_passage_dates))\n\n # choose top num_paragraphs paragraphs\n passages, scores, passage_titles = (\n passages[:num_paragraphs],\n scores[:num_paragraphs],\n passage_titles[:num_paragraphs],\n )\n\n return passages, scores, passage_titles\n\n def _retrieve_evidences(self, claims, top_p: float = 1):\n \"\"\"\n Retrieve evidences\n Args:\n - `claims` (list): list of (claim, year)\n - `top_p` (float): chooses from the smallest possible set of results whose cumulative probability exceeds top_p\n Returns:\n - `ret_output` (dict): a dict from claim_id to a list of `evidence`\n - each `evidence` is a list of length 5: [`title of wikipedia page`, `wikipedia text`, `similarity_score`]\n \"\"\"\n ret_output = dict()\n for id, (cl, year) in enumerate(claims):\n # if self.args.reranking_method == \"none\":\n # No re-ranking on evidence. Reranking to match the dates increases the risk of confirmation bias.\n passages, passage_scores, passage_titles = self._colbert_retrieve(\n query=cl, num_paragraphs=self.evi_num, top_p=top_p, rerank=\"none\"\n )\n # else:\n # # retrieve more so that we can match the dates\n # passages, passage_scores, passage_titles = self._colbert_retrieve(\n # query=cl,\n # num_paragraphs=self.evi_num,\n # rerank=year,\n # num_paragraphs_for_reranking=self.evi_num * 3,\n # top_p=top_p,\n # )\n evidences = []\n for passage, score, title in zip(passages, passage_scores, passage_titles):\n evidences.append([title, passage, score])\n ret_output[id] = evidences\n\n return ret_output\n\n def _verify_claims(\n self,\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct: bool,\n engine_dict: dict,\n ):\n \"\"\"\n Verify claims using retrieval output\n Args:\n - `claims` (list): list of (claim, year) pairs splitted\n - `ret_output` (dict): a dict from claim_id to a list of `evidence`\n - each `evidence` is a list of length 5: [`title of wikipedia page`, `wikipedia text`, `similarity_score`]\n - `object_dlg_history`(str): list of previous DialogueTurns\n - `last_user_utterance` (str): last user utterance\n - `original_reply`(str): original LLM response\n Returns:\n - `ver_output` (list): a list of verification label (\"SUPPORTS\", \"REFUTES\", \"NOT ENOUGH INFO\") and the fixed claims\n \"\"\"\n ver_output = []\n parameter_values_list = []\n\n for claim_id, (cl, year) in enumerate(claims):\n evidences = ret_output[claim_id][: self.evi_num]\n parameter_values_list.append(\n {\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": new_user_utterance,\n \"original_reply\": original_reply,\n \"claim\": cl,\n \"evidence_titles\": [e[0] for e in evidences],\n \"evidence_texts\": [e[1] for e in evidences],\n \"do_correct\": do_correct\n }\n )\n\n # when using gold evidence, we do not split claim so claim is the same with original reply\n if self.skip_verification:\n all_verification_responses = ['is \"SUPPORTS\"'] * len(claims)\n else:\n all_verification_responses = llm_generate(\n template_file=\"verify.prompt\",\n prompt_parameter_values=parameter_values_list,\n engine=engine_dict[\"default\"],\n max_tokens=200,\n temperature=0,\n stop_tokens=None,\n postprocess=False,\n )\n\n for (cl, year), verification_response in zip(\n claims, all_verification_responses\n ):\n # logger.info(\"claim: %s ; verification_response: %s\", cl, verification_response)\n # the following handles cases where smaller models like gpt-35-turbo do not follow the few-shot examples' format\n if (\n 'is \"supports\"' in verification_response.lower()\n or \"no fact-checking is needed for this claim\"\n in verification_response.lower()\n or \"the fact-checking result is not applicable to this response\"\n in verification_response.lower()\n ):\n verification_label = \"SUPPORTS\"\n fixed_claim = cl\n elif (\n 'the fact-checking result is \"not enough info\"'\n in verification_response.lower()\n ):\n verification_label = \"NOT ENOUGH INFO\"\n fixed_claim = \"\"\n else:\n verification_label = \"REFUTES\" # default set to be \"REFUTES\"\n fixed_claim = \"\"\n\n if do_correct and verification_label != \"SUPPORTS\":\n if \"You rewrite your claim:\" in verification_response:\n fixed_claim = verification_response.split(\n \"You rewrite your claim:\"\n )[-1].strip()\n else:\n logger.error(\n \"verification prompt did not fix a %s. Output: %s\"\n % (verification_label, verification_response)\n )\n\n ver_output.append({\"label\": verification_label, \"fixed_claim\": fixed_claim})\n\n return ver_output" }, { "identifier": "make_parent_directories", "path": "pipelines/utils.py", "snippet": "def make_parent_directories(file_name: str):\n \"\"\"\n Creates the parent directories of `file_name` if they don't exist\n \"\"\"\n pathlib.Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)" }, { "identifier": "add_pipeline_arguments", "path": "pipelines/pipeline_arguments.py", "snippet": "def add_pipeline_arguments(parser):\n # determine components of the pipeline\n parser.add_argument(\n \"--pipeline\",\n type=str,\n required=True,\n choices=[\n \"generate_and_correct\",\n \"retrieve_and_generate\",\n \"generate\",\n \"retrieve_only\",\n \"early_combine\",\n \"atlas\",\n ],\n default=\"generate_and_correct\",\n help=\"The type of pipeline used to imrpove GPT-3 response. Only used to know which modules to load.\",\n )\n parser.add_argument(\n \"--claim_prompt_template_file\",\n type=str,\n default=\"split_claims.prompt\",\n help=\"The path to the file containing the claim LLM prompt.\",\n )\n parser.add_argument(\n \"--refinement_prompt\",\n default=\"refine_w_feedback.prompt\",\n help=\"What prompt to use to refine the final response.\",\n )\n parser.add_argument(\n \"--do_refine\", action=\"store_true\", help=\"Whether to refine the final response.\"\n )\n parser.add_argument(\n \"--skip_verification\",\n action=\"store_true\",\n help=\"If True, all claims will be considered correct without fact-checking. Especially useful to speed up debugging of the other parts of the pipeline.\",\n )\n\n parser.add_argument(\n \"--fuse_claim_splitting\",\n action=\"store_true\",\n help=\"If True, The first claim splitting stage of early_combine pipeline will be fused with the generate stage. Only useful for distilled models that have been trained to do this.\",\n )\n\n parser.add_argument(\n \"--colbert_endpoint\",\n type=str,\n default=\"http://127.0.0.1:5000/search\",\n help=\"whether using colbert for retrieval.\",\n )\n parser.add_argument(\n \"--engine\",\n type=str,\n required=True,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--generate_engine\",\n type=str,\n default=None,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use for the 'generate' stage of pipelines. If provided, overrides --engine for that stage.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--draft_engine\",\n type=str,\n default=None,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use for the 'draft' stage of pipelines. If provided, overrides --engine for that stage.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--reranking_method\",\n type=str,\n choices=[\"none\", \"date\"],\n default=\"none\",\n help=\"Only used for retrieve_and_generate pipeline\",\n )\n\n # LLM generation hyperparameters\n parser.add_argument(\n \"--max_tokens\",\n type=int,\n default=250,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=0.8,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--top_p\",\n type=float,\n default=0.9,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--frequency_penalty\",\n type=float,\n default=0.0,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--presence_penalty\",\n type=float,\n default=0.0,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n\n parser.add_argument(\n \"--evi_num\",\n type=int,\n default=2,\n help=\"Number of evidences to retrieve per claim.\",\n )\n\n parser.add_argument(\n \"--retrieval_num\",\n type=int,\n default=3,\n help=\"Number of passages to retrieve when searching for information.\",\n )" }, { "identifier": "check_pipeline_arguments", "path": "pipelines/pipeline_arguments.py", "snippet": "def check_pipeline_arguments(args):\n # make sure for ATLAS, both engine and pipeline are set to 'atlas'\n if hasattr(args, \"pipeline\"):\n if (args.engine == \"atlas\" and args.pipeline != \"atlas\") or (\n args.engine != \"atlas\" and args.pipeline == \"atlas\"\n ):\n raise ValueError(\n \"When using ATLAS, both `engine` and `pipeline` input arguments should be set to 'atlas'.\"\n )" }, { "identifier": "_fill_template", "path": "llm/load_prompt.py", "snippet": "def _fill_template(template_file, prompt_parameter_values, get_rendered_blocks=False):\n # logger.info(\"Filling template %s\", template_file)\n template = jinja_environment.get_template(template_file)\n\n prompt_parameter_values[\"instruction_start\"] = system_start\n prompt_parameter_values[\"instruction_end\"] = system_end\n prompt_parameter_values[\"input_start\"] = user_start\n prompt_parameter_values[\"input_end\"] = user_end\n prompt_parameter_values[\"output_start\"] = assistant_start\n prompt_parameter_values[\"output_end\"] = assistant_end\n\n # always make these useful constants available in a template\n # make a new function call each time since the date might change during a long-term server deployment\n today = datetime.now(pytz.timezone(\"US/Pacific\")).date()\n prompt_parameter_values[\"today\"] = today.strftime(\"%B %d, %Y\") # May 30, 2023\n prompt_parameter_values[\"current_year\"] = today.year\n prompt_parameter_values[\"location\"] = \"the U.S.\"\n prompt_parameter_values[\"chatbot_name\"] = \"WikiChat\"\n\n filled_prompt = template.render(**prompt_parameter_values)\n filled_prompt = _remove_starting_and_ending_whitespace(filled_prompt)\n\n # Access the 'content' block and render it\n rendered_blocks = {}\n if get_rendered_blocks:\n for block_name in template.blocks.keys():\n block = template.blocks[block_name](\n template.new_context(vars=prompt_parameter_values)\n )\n rendered = \"\".join(block)\n rendered = _remove_chat_tags(\n rendered\n ) # blocks are used for logging and local engines, so should never have chat tags\n rendered_blocks[block_name] = rendered\n\n return filled_prompt, rendered_blocks" }, { "identifier": "get_total_cost", "path": "llm/global_variables.py", "snippet": "def get_total_cost():\n global total_cost\n return total_cost" } ]
import argparse import json import pandas as pd import sys from typing import List from tqdm import tqdm from pipelines.dialog_turn import DialogueTurn from pipelines.chatbot import Chatbot from pipelines.utils import make_parent_directories from pipelines.pipeline_arguments import ( add_pipeline_arguments, check_pipeline_arguments, ) from llm.load_prompt import _fill_template from llm.global_variables import get_total_cost
12,722
return evidence_texts, evidence_titles # TODO parallelize this function def format_simulated_data(args): simulation_pipeline = args.pipeline args.pipeline = "generate_and_correct" # needed to use claim_splitter from chatbot chatbot = Chatbot(args) dlg_history = [] dlg_claims = set() make_parent_directories(args.output_file) content_list = [] metadata_list = [] example_id = 0 turn_num = 0 dlg_topic = "" with open(args.input_file, "r") as f: for line in tqdm(f.readlines(), desc="Lines"): line_split = line.split("): ") if line_split[0].startswith("User"): cur_dlg_turn = DialogueTurn(user_utterance=line_split[1].strip()) elif line_split[0].startswith("Chatbot"): turn_num += 1 cur_dlg_turn.agent_utterance = line_split[1].strip() claims = chatbot.claim_splitter.split_claim( dialog_history=dlg_history, new_user_utterance=cur_dlg_turn.user_utterance, current_agent_utterance=cur_dlg_turn.agent_utterance, system_parameters={"engine": "gpt-4"}, dialog_topic=dlg_topic, ) # print(claims) dlg_history.append(cur_dlg_turn) ret_output = chatbot._retrieve_evidences(claims, top_p=0.7) for claim_idx, evidences in ret_output.items(): claim_idx = int(claim_idx) if claims[claim_idx][0] in dlg_claims: # print("Skipping duplicate claim") continue claim = claims[claim_idx][0] evidence_texts = [e[2] for e in evidences] evidence_titles = [e[0] for e in evidences] evidence_texts, evidence_titles = highlight_keywords_from_claim( claim, evidence_texts, evidence_titles ) turn_params = { "user_utterance": cur_dlg_turn.user_utterance, "dialog_history": dlg_history, "claim": claim, "evidence_titles": evidence_titles, "evidence_texts": evidence_texts, } content, _ = _fill_template(args.scale_template_file, turn_params) # print(content) # exit() content_list.append(content) metadata_list.append( json.dumps( { "pipeline": simulation_pipeline, "subset": args.subset, "engine": args.engine, # "atlas" "id": str(example_id), "turn_num": str(turn_num), "agent_utterance": cur_dlg_turn.agent_utterance, } ) ) example_id += 1 for c in claims: dlg_claims.add(c[0]) elif line.startswith("====="): turn_num = 0 dlg_history = [] dlg_claims = set() elif line.startswith("Topic:"): dlg_topic = line[7:].strip() # print("dialog topic = ", dlg_topic) else: raise ValueError("ERROR: Unknown line type %s" % line) df = pd.DataFrame({"text": content_list, "metadata": metadata_list}) df.to_csv(args.output_file) if __name__ == "__main__": parser = argparse.ArgumentParser() add_pipeline_arguments(parser) parser.add_argument( "--input_file", type=str, required=True, help="Where to read the partial conversations from, with the last line of each conversation being the model response.", ) parser.add_argument( "--output_file", type=str, required=True, help="Where to write the outputs." ) parser.add_argument( "--scale_template_file", type=str, default="benchmark/prompts/scale_factuality.prompt", help="prompt file to generate input data file for scale ai human evaluation.", ) parser.add_argument( "--subset", type=str, required=True, help="The subset of the benchamrk.", ) args = parser.parse_args()
sys.path.insert(0, "./") stopwords = [ "the", "a", "and", "or", "then", "he", "she", "it", "they", "you", "to", "me", "on", "was", "at", "in", "was", "of", "for", "is", "are", "were", "not", "be", "had", "I", "would", "will", ] stopwords += [s.capitalize() for s in stopwords] def highlight_keywords_from_claim( claim: str, evidence_texts: List[str], evidence_titles: List[str] ): claim_keywords = [ w for w in claim.replace(".", " ") .replace(",", " ") .replace("?", " ") .replace('"', " ") .replace("'", " ") .split(" ") if w not in stopwords and len(w) > 0 ] evidence_texts = [ e.replace("$", "\$").replace("–", "-") for e in evidence_texts ] # escape $ to work with Scale's UI for prefix in [" ", "\n", "(", '"']: for suffix in [" ", ".", ",", ";", "?", ")", "\n", '"']: for i in range(len(evidence_texts)): for k in claim_keywords: evidence_texts[i] = evidence_texts[i].replace( prefix + k + suffix, prefix + '<strong style="background-color:beige;"">' + k + "</strong>" + suffix, ) evidence_titles[i] = evidence_titles.replace( prefix + k + suffix, prefix + '<strong style="background-color:beige;"">' + k + "</strong>" + suffix, ) return evidence_texts, evidence_titles # TODO parallelize this function def format_simulated_data(args): simulation_pipeline = args.pipeline args.pipeline = "generate_and_correct" # needed to use claim_splitter from chatbot chatbot = Chatbot(args) dlg_history = [] dlg_claims = set() make_parent_directories(args.output_file) content_list = [] metadata_list = [] example_id = 0 turn_num = 0 dlg_topic = "" with open(args.input_file, "r") as f: for line in tqdm(f.readlines(), desc="Lines"): line_split = line.split("): ") if line_split[0].startswith("User"): cur_dlg_turn = DialogueTurn(user_utterance=line_split[1].strip()) elif line_split[0].startswith("Chatbot"): turn_num += 1 cur_dlg_turn.agent_utterance = line_split[1].strip() claims = chatbot.claim_splitter.split_claim( dialog_history=dlg_history, new_user_utterance=cur_dlg_turn.user_utterance, current_agent_utterance=cur_dlg_turn.agent_utterance, system_parameters={"engine": "gpt-4"}, dialog_topic=dlg_topic, ) # print(claims) dlg_history.append(cur_dlg_turn) ret_output = chatbot._retrieve_evidences(claims, top_p=0.7) for claim_idx, evidences in ret_output.items(): claim_idx = int(claim_idx) if claims[claim_idx][0] in dlg_claims: # print("Skipping duplicate claim") continue claim = claims[claim_idx][0] evidence_texts = [e[2] for e in evidences] evidence_titles = [e[0] for e in evidences] evidence_texts, evidence_titles = highlight_keywords_from_claim( claim, evidence_texts, evidence_titles ) turn_params = { "user_utterance": cur_dlg_turn.user_utterance, "dialog_history": dlg_history, "claim": claim, "evidence_titles": evidence_titles, "evidence_texts": evidence_texts, } content, _ = _fill_template(args.scale_template_file, turn_params) # print(content) # exit() content_list.append(content) metadata_list.append( json.dumps( { "pipeline": simulation_pipeline, "subset": args.subset, "engine": args.engine, # "atlas" "id": str(example_id), "turn_num": str(turn_num), "agent_utterance": cur_dlg_turn.agent_utterance, } ) ) example_id += 1 for c in claims: dlg_claims.add(c[0]) elif line.startswith("====="): turn_num = 0 dlg_history = [] dlg_claims = set() elif line.startswith("Topic:"): dlg_topic = line[7:].strip() # print("dialog topic = ", dlg_topic) else: raise ValueError("ERROR: Unknown line type %s" % line) df = pd.DataFrame({"text": content_list, "metadata": metadata_list}) df.to_csv(args.output_file) if __name__ == "__main__": parser = argparse.ArgumentParser() add_pipeline_arguments(parser) parser.add_argument( "--input_file", type=str, required=True, help="Where to read the partial conversations from, with the last line of each conversation being the model response.", ) parser.add_argument( "--output_file", type=str, required=True, help="Where to write the outputs." ) parser.add_argument( "--scale_template_file", type=str, default="benchmark/prompts/scale_factuality.prompt", help="prompt file to generate input data file for scale ai human evaluation.", ) parser.add_argument( "--subset", type=str, required=True, help="The subset of the benchamrk.", ) args = parser.parse_args()
check_pipeline_arguments(args)
4
2023-10-19 18:17:25+00:00
16k
jhejna/cpl
research/algs/off_policy_algorithm.py
[ { "identifier": "ReplayBuffer", "path": "research/datasets/replay_buffer/buffer.py", "snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functions starting with \"_\", like \"_help\" are to be used only by the replay buffer internaly. They\n are carefully setup for multiprocesing.\n 2. variables/functions named regularly without a leading \"_\" are to be used by the main thread. This includes\n standard functions like \"add\".\n\n There are a few critical setup options.\n 1. Capacity: determines if the buffer is setup upon creation. If it is set to a known value, then we can add data\n online with `add`, or by pulling more data from disk. If is set to None, the dataset is initialized to the full\n size of the offline dataset.\n 2. path: path to offline data that will be loaded\n 3. _data_generator\n\n Some options are mutually exclusive. For example, it is bad to use a non-distributed layout with\n workers and online data. This will generate a bunch of copy on writes.\n\n Data is expected to be stored in a \"next\" format. This means that data is stored like this:\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n s_3, a_2 , r_2 , d_2 ... End of episode!\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n\n This format is expected from the load(path) funciton.\n\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n sample_fn: Union[str, Callable] = \"sample\",\n sample_kwargs: Optional[Dict] = None,\n epoch_ratio: float = 1.0,\n path: Optional[str] = None,\n capacity: Optional[int] = None,\n exclude_keys: Optional[List[str]] = None,\n include_keys: Optional[Dict] = None,\n stacked_obs: bool = False,\n stacked_action: bool = False,\n distributed: bool = False,\n fetch_every: int = 1000,\n cleanup: bool = True,\n ) -> None:\n # Remove stacking if present.\n self.stacked_obs = stacked_obs\n if self.stacked_obs:\n observation_space = remove_stack_dim(observation_space)\n self.stacked_action = stacked_action\n if self.stacked_action:\n action_space = remove_stack_dim(action_space)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n # Construct the space for the buffer\n self.exclude_keys = [] if exclude_keys is None else exclude_keys # keys to exclude in the storage buffer\n buffer_space = {\n \"obs\": self.observation_space,\n \"action\": self.action_space,\n \"reward\": 0.0,\n \"done\": False,\n \"discount\": 1.0,\n }\n flattened_buffer_space = utils.flatten_dict(buffer_space)\n if include_keys is not None:\n flattened_buffer_space.update(include_keys)\n print(\"FLATTENED BUFFER SPACE\", flattened_buffer_space)\n for k in self.exclude_keys:\n if k in flattened_buffer_space:\n del flattened_buffer_space[k]\n self.buffer_space = utils.nest_dict(flattened_buffer_space)\n\n self.dummy_action = self.action_space.sample()\n self.capacity = capacity\n\n # Setup the sampler\n if isinstance(sample_fn, str):\n sample_fn = vars(sampling)[sample_fn]\n # Use functools partial to override the default args.\n sample_kwargs = {} if sample_kwargs is None else sample_kwargs\n self.sample_fn = functools.partial(sample_fn, **sample_kwargs)\n # Add sampling parameters\n self.epoch_ratio = epoch_ratio\n\n # Path for preloaded data\n self.path = path\n\n # Setup based on distributed value\n self.distributed = distributed\n if self.distributed:\n self.cleanup = cleanup\n self.fetch_every = fetch_every\n if self.capacity is not None:\n self.storage_path = tempfile.mkdtemp(prefix=\"replay_buffer_\")\n print(\"[research] Replay Buffer Storage Path\", self.storage_path)\n self.current_ep = utils.nest_dict({k: list() for k in flattened_buffer_space.keys()})\n self.num_episodes = 0\n else:\n self._alloc(self.capacity) # Alloc immediately\n\n def _alloc(self, capacity):\n # Create the data generator\n self._current_data_generator = self._data_generator()\n\n if capacity is None:\n # Allocte the entire dataset\n data = utils.concatenate(*list(self._current_data_generator), dim=0)\n self._storage = storage.FixedStorage(data)\n else:\n # Construct the buffer space. Remember to exclude any exclude keys\n self._storage = storage.CircularStorage(self.buffer_space, capacity)\n # Fill the storage.\n # if self.path is not None:\n for data in self._current_data_generator:\n self._storage.extend(data)\n if self._storage.size >= self._storage.capacity:\n break\n\n print(\"[ReplayBuffer] Allocated {:.2f} GB\".format(self._storage.bytes / 1024**3))\n\n def _data_generator(self):\n \"\"\"\n Can be overridden in order to load the initial data differently.\n By default assumes the data to be the standard format, and returned as a data dictionary.\n or\n None\n\n This function can be overriden by sub-classes in order to produce data batches.\n It should do the following:\n 1. split data across torch data workers\n 2. randomize the order of data\n 3. yield data of the form dicts\n \"\"\"\n if self.path is None:\n return\n\n # By default get all of the file names that are distributed at the correct index\n worker_info = torch.utils.data.get_worker_info()\n num_workers = 1 if worker_info is None else worker_info.num_workers\n worker_id = 0 if worker_info is None else worker_info.id\n\n ep_filenames = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith(\".npz\")]\n random.shuffle(ep_filenames) # Shuffle all the filenames\n\n if num_workers > 1 and len(ep_filenames) == 1:\n print(\n \"[ReplayBuffer] Warning: using multiple workers but single replay file. Reduce memory usage by sharding\"\n \" data with `save` instead of `save_flat`.\"\n )\n elif num_workers > 1 and len(ep_filenames) < num_workers:\n print(\"[ReplayBuffer] Warning: using more workers than dataset files.\")\n\n for ep_filename in ep_filenames:\n ep_idx, _ = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n # Spread loaded data across workers if we have multiple workers and files.\n if ep_idx % num_workers != worker_id and len(ep_filenames) > 1:\n continue # Only yield the files belonging to this worker.\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n yield data\n\n def _fetch_offline(self) -> int:\n \"\"\"\n This simple function fetches a new episode from the offline dataset and adds it to the buffer.\n This is done for each worker.\n \"\"\"\n try:\n data = next(self._current_data_generator)\n except StopIteration:\n self._current_data_generator = self._data_generator()\n data = next(self._current_data_generator)\n self._storage.extend(data)\n # Return the fetched size\n return len(data[\"done\"]) # data must have the done key for storage\n\n def _fetch_online(self) -> int:\n worker_info = torch.utils.data.get_worker_info()\n assert worker_info is not None, \"Must use distributed buffer for online fetching.\"\n\n ep_filenames = sorted([os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)], reverse=True)\n fetched_size = 0\n for ep_filename in ep_filenames:\n ep_idx, ep_len = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n if ep_idx % worker_info.num_workers != worker_info.id:\n continue\n if ep_filename in self._episode_filenames:\n break # We found something we have already loaded\n if fetched_size + ep_len > self._storage.capacity:\n break # do not fetch more than the size of the replay buffer\n\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n self._storage.extend(data)\n self._episode_filenames.add(ep_filename)\n if self.cleanup:\n try:\n os.remove(ep_filename)\n except OSError:\n pass\n\n return fetched_size\n\n def _get_dummy_transition(self, obs):\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n dummy_transition = {\n k: v.sample() if isinstance(v, gym.Space) else v\n for k, v in flattened_buffer_space.items()\n if not k.startswith(\"obs\") and not k.startswith(\"action\")\n }\n dummy_transition = utils.nest_dict(dummy_transition)\n dummy_transition[\"obs\"] = obs\n dummy_transition[\"action\"] = self.dummy_action\n return dummy_transition\n\n def _reset_current_ep(self):\n ep_idx = self.num_episodes\n ep_len = len(self.current_ep[\"done\"])\n self.num_episodes += 1\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n storage.save_data(self.current_ep, os.path.join(self.storage_path, ep_filename))\n\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n ep = {k: list() for k in flattened_buffer_space.keys()}\n self.current_ep = utils.nest_dict(ep)\n\n def add(self, **kwargs):\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # Preprocess here before adding to storage\n if len(kwargs) == 1:\n assert \"obs\" in kwargs\n kwargs = self._get_dummy_transition(kwargs[\"obs\"])\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n else:\n # We have a full transitions\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n if self.stacked_action:\n kwargs[\"action\"] = utils.get_from_batch(kwargs[\"action\"], -1)\n\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n\n # This function is overwritten for distributed / local buffers\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.append(self.current_ep, kwargs)\n if kwargs[\"done\"]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.add(kwargs)\n\n def extend(self, **kwargs):\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # TODO: There is a chance that if we add a full sequence we will end up with (B, T, stack, ...)\n # which is not what we want. We could compare the shapes of the observation space to fix it\n # but this code might be unnecesary, as this class shouldn't really be used like that anyways.\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.extend(self.current_ep, kwargs)\n if kwargs[\"done\"][-1]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.extend(kwargs)\n\n def save(self, path):\n os.makedirs(path, exist_ok=True)\n if self.distributed:\n if self.cleanup:\n print(\"[research] Warning, attempting to save a cleaned up replay buffer. There are likely no files\")\n srcs = os.listdir(self.storage_path)\n for src in srcs:\n shutil.move(os.path.join(self.storage_path, src), os.path.join(path, src))\n print(\"Successfully saved\", len(srcs), \"episodes.\")\n else:\n ep_len = self._storage.size\n ep_idx = 0\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n save_path = os.path.join(path, ep_filename)\n self._storage.save(save_path)\n\n def sample(self, *args, **kwargs):\n return self.sample_fn(self._storage, *args, **kwargs)\n\n def __iter__(self):\n assert not hasattr(self, \"_iterated\"), \"__iter__ called twice!\"\n self._iterated = True\n worker_info = torch.utils.data.get_worker_info()\n assert (worker_info is not None) == self.distributed, \"ReplayBuffer.distributed not set correctly!\"\n\n # allocate the buffer with the given capacity\n if self.distributed:\n self._alloc(None if self.capacity is None else self.capacity // worker_info.num_workers)\n self._episode_filenames = set()\n\n self._learning_online = False\n\n samples_since_last_offline_fetch = 0\n samples_since_last_online_fetch = 0\n last_offline_fetch_size = 0\n\n batch_size = self.sample_fn.keywords.get(\"batch_size\", 1)\n stack_size = self.sample_fn.keywords.get(\"stack\", 1)\n seq_size = self.sample_fn.keywords.get(\"seq_length\", 1)\n\n while True:\n if self._storage.size < seq_size * stack_size + 1:\n yield {} # If the buffer is too small for sampling, continue.\n else:\n sample = self.sample_fn(self._storage)\n if batch_size == 1:\n sample = utils.squeeze(sample, 0)\n yield sample\n\n # Fetch new data if we have a circular buffer.\n if isinstance(self._storage, storage.CircularStorage):\n if self.distributed: # Always check for online data\n # We fetch from the online buffer\n samples_since_last_online_fetch += 1\n if samples_since_last_online_fetch >= self.fetch_every:\n fetch_size = self._fetch_online()\n self._learning_online = self._learning_online or (fetch_size > 0)\n samples_since_last_online_fetch = 0\n\n if not self._learning_online and self.path is not None:\n # We fetch from the offline buffer\n samples_since_last_offline_fetch += 1\n data_pts_since_last_offline_fetch = (\n samples_since_last_offline_fetch * batch_size * seq_size * stack_size\n )\n if data_pts_since_last_offline_fetch >= last_offline_fetch_size * self.epoch_ratio:\n last_offline_fetch_size = self._fetch_offline()\n samples_since_last_offline_fetch = 0\n\n def __del__(self):\n if not self.distributed:\n return\n if self.cleanup:\n return\n else:\n paths = [os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)]\n for path in paths:\n try:\n os.remove(path)\n except OSError:\n pass\n try:\n os.rmdir(self.storage_path)\n except OSError:\n pass" }, { "identifier": "storage", "path": "research/datasets/replay_buffer/storage.py", "snippet": "def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:\ndef save_data(data: Dict, path: str) -> None:\ndef get_bytes(buffer: Union[Dict, np.ndarray]) -> int:\n def capacity(self):\n def size(self):\n def starts(self):\n def ends(self):\n def lengths(self):\n def bytes(self):\n def save(self, path):\n def __getitem__(self, key):\n def __getattr__(self, name):\n def __contains__(self, key):\n def add(self, data):\n def extend(self, data):\n def __init__(self, buffers: Dict) -> None:\n def add(self, data):\n def extend(self, data):\n def __init__(self, initial_capacity: int = 100, dtype=np.int64):\n def _reset(self):\n def append(self, value):\n def pop(self):\n def popleft(self):\n def view(self):\n def __len__(self):\n def first(self):\n def last(self):\n def __str__(self):\n def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:\n def _update_markers(self, new_ends: Iterable = ()):\n def add(self, data):\n def extend(self, data):\nclass Storage(abc.ABC):\nclass FixedStorage(Storage):\nclass NPQueue(object):\nclass CircularStorage(Storage):" }, { "identifier": "EmptyEnv", "path": "research/envs/base.py", "snippet": "class EmptyEnv(gym.Env):\n\n \"\"\"\n An empty holder for defining supervised learning problems\n It works by specifying the ranges and shapes.\n \"\"\"\n\n def __init__(\n self,\n observation_low=None,\n observation_high=None,\n observation_shape=None,\n observation_dtype=np.float32,\n observation_space=None,\n action_low=None,\n action_high=None,\n action_shape=None,\n action_dtype=np.float32,\n action_space=None,\n ):\n if observation_space is not None:\n self.observation_space = observation_space\n else:\n self.observation_space = _get_space(observation_low, observation_high, observation_shape, observation_dtype)\n if action_space is not None:\n self.action_space = action_space\n else:\n self.action_space = _get_space(action_low, action_high, action_shape, action_dtype)\n\n def step(self, action):\n raise NotImplementedError(\"Empty Env does not have step\")\n\n def reset(self, **kwargs):\n raise NotImplementedError(\"Empty Env does not have reset\")" }, { "identifier": "ModuleContainer", "path": "research/networks/base.py", "snippet": "class ModuleContainer(torch.nn.Module):\n CONTAINERS = []\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, **kwargs) -> None:\n super().__init__()\n # save the classes and containers\n base_kwargs = {k: v for k, v in kwargs.items() if not k.endswith(\"_class\") and not k.endswith(\"_kwargs\")}\n\n output_space = observation_space\n for container in self.CONTAINERS:\n module_class = kwargs.get(container + \"_class\", torch.nn.Identity)\n module_class = vars(research.networks)[module_class] if isinstance(module_class, str) else module_class\n if module_class is torch.nn.Identity:\n module_kwargs = dict()\n else:\n module_kwargs = base_kwargs.copy()\n module_kwargs.update(kwargs.get(container + \"_kwargs\", dict()))\n # Create the module, and attach it to self\n module = module_class(output_space, action_space, **module_kwargs)\n setattr(self, container, module)\n\n # Set a reset function\n setattr(self, \"reset_\" + container, partial(self._reset, container))\n\n if hasattr(getattr(self, container), \"output_space\"):\n # update the output space\n output_space = getattr(self, container).output_space\n\n # Done creating all sub-modules.\n\n @classmethod\n def create_subset(cls, containers):\n assert all([container in cls.CONTAINERS for container in containers])\n name = \"\".join([container.capitalize() for container in containers]) + \"Subset\"\n return type(name, (ModuleContainer,), {\"CONTAINERS\": containers})\n\n def _reset(self, container: str) -> None:\n module = getattr(self, container)\n with torch.no_grad():\n module.apply(reset)\n\n def compile(self, **kwargs):\n for container in self.CONTAINERS:\n attr = getattr(self, container)\n if type(attr).forward == torch.nn.Module.forward:\n assert hasattr(attr, \"compile\"), (\n \"container \" + container + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, container, torch.compile(attr, **kwargs))\n\n def forward(self, x):\n # Use all of the modules in order\n for container in self.CONTAINERS:\n x = getattr(self, container)(x)\n return x" }, { "identifier": "runners", "path": "research/utils/runners.py", "snippet": "class CloudpickleWrapper:\nclass AsyncState(Enum):\nclass AsyncEnv(gym.Env):\nclass MPRunner(object):\n def __init__(self, fn: Callable):\n def __getstate__(self):\n def __setstate__(self, ob):\n def __call__(self):\ndef alloc_shared_buffer(space: Any):\ndef read_shared_buffer(shared_buffer: Any, space: gym.Space):\ndef write_shared_buffer(shared_buffer: Any, space: gym.Space, value: Any):\n def __init__(\n self, env_fn: Callable, observation_space: Optional[gym.Space] = None, action_space: Optional[gym.Space] = None\n ):\n def step_send(self, action):\n def step_recv(self):\n def step(self, action):\n def reset_send(self):\n def reset_recv(self):\n def reset(self):\n def close(self):\ndef _async_env_worker(env_fn, pipe, parent_pipe, obs_buffer, action_buffer):\n def __init__(\n self,\n env_fn,\n fn: Optional[Callable] = None,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n **kwargs,\n ):\n def start(self, fn: Optional[Callable] = None, **kwargs):\n def started(self):\n def __call__(self, block=False):\n def step(self, *args, **kwargs):\n def reset(self, *args, **kwargs):\n def close(self):\n DEFAULT = \"default\"\n WAITING_RESET = \"reset\"\n WAITING_STEP = \"step\"" }, { "identifier": "utils", "path": "research/utils/utils.py", "snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):" }, { "identifier": "Algorithm", "path": "research/algs/base.py", "snippet": "class Algorithm(ABC):\n _save_keys: Set[str]\n _compiled: bool\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n network_class: Type[torch.nn.Module],\n dataset_class: Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]],\n network_kwargs: Optional[Dict] = None,\n dataset_kwargs: Optional[Dict] = None,\n validation_dataset_class: Optional[\n Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]]\n ] = None,\n validation_dataset_kwargs: Optional[Dict] = None,\n optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,\n optim_kwargs: Optional[Dict] = None,\n schedulers_class: Optional[Dict] = None,\n schedulers_kwargs: Optional[Dict[str, Dict]] = None,\n processor_class: Optional[Type[Processor]] = None,\n processor_kwargs: Optional[Dict] = None,\n checkpoint: Optional[str] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n # Initialize the _save_keys attribute using the superclass.\n # These are used for automatically identifying keys for saving/loading.\n super().__setattr__(\"_save_keys\", set())\n super().__setattr__(\"_module_keys\", set())\n super().__setattr__(\"_compiled\", False)\n\n # Save relevant values\n self.observation_space = observation_space\n self.action_space = action_space\n self.optim = {}\n\n # setup devices\n if device == \"auto\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self._device = torch.device(device)\n\n # Setup the data preprocessor first. Thus, if we need to reference it in network setup we can.\n # Everything here is saved in self.processor\n self.setup_processor(processor_class, {} if processor_kwargs is None else processor_kwargs)\n\n # Create the network.\n network_kwargs = {} if network_kwargs is None else network_kwargs\n self.setup_network(network_class, network_kwargs)\n\n # Save values for optimizers, which will be lazily initialized later\n self.optim = {}\n self.optim_class = optim_class\n self.optim_kwargs = {\"lr\": 0.0001} if optim_kwargs is None else optim_kwargs\n\n # Save values for schedulers, which will be lazily initialized later\n self.schedulers = {}\n self.schedulers_class = {} if schedulers_class is None else schedulers_class\n self.schedulers_kwargs = {} if schedulers_kwargs is None else schedulers_kwargs\n\n # Save values for datasets, which will be lazily initialized later\n self.dataset_class = dataset_class\n self.dataset_kwargs = {} if dataset_kwargs is None else dataset_kwargs\n self.validation_dataset_class = validation_dataset_class\n self.validation_dataset_kwargs = validation_dataset_kwargs\n\n self._training = False\n\n # Load a check point if we have one -- using non-strict enforcement.\n # NOTE: this only loads the network and will _not_ load the optimizer checkpoint.\n if checkpoint is not None:\n self.load(checkpoint, strict=False)\n\n @property\n def device(self):\n return self._device\n\n @property\n def training(self) -> bool:\n return self._training\n\n def __setattr__(self, name: str, value: Any) -> None:\n # Check to see if the value is a module etc.\n if (hasattr(self, \"_save_keys\") and name in self._save_keys) or (\n hasattr(self, \"_module_keys\") and name in self._module_keys\n ):\n pass\n elif isinstance(value, torch.nn.Parameter):\n self._save_keys.add(name)\n elif isinstance(value, torch.nn.Module):\n self._module_keys.add(name)\n if sum(p.numel() for p in value.parameters()) > 0:\n self._save_keys.add(name) # store if we have a module with more than zero parameters.\n return super().__setattr__(name, value)\n\n @property\n def save_keys(self) -> List[str]:\n return self._save_keys\n\n @property\n def module_keys(self) -> List[str]:\n return self._module_keys\n\n @property\n def compiled(self) -> bool:\n return self._compiled\n\n def to(self, device) -> \"Algorithm\":\n for k in self.save_keys:\n if k == \"processor\" and not self.processor.supports_gpu:\n continue\n else:\n setattr(self, k, getattr(self, k).to(device))\n return self\n\n def compile(self, **kwargs):\n for k in self.save_keys:\n attr = getattr(self, k)\n if isinstance(attr, torch.nn.Module):\n if type(attr).forward == torch.nn.Module.forward:\n # In this case, the forward method hasn't been overriden.\n # Thus we assume there is a compile argument.\n assert hasattr(attr, \"compile\"), (\n \"save key \" + k + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, k, torch.compile(attr, **kwargs))\n # indicate that we have compiled the models.\n self._compiled = True\n\n def train(self) -> None:\n for k in self._module_keys:\n getattr(self, k).train()\n self._training = True\n\n def eval(self) -> None:\n for k in self._module_keys:\n getattr(self, k).eval()\n self._training = False\n\n @property\n def num_params(self):\n _num_params = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n _num_params += sum(p.numel() for p in attr.parameters() if p.requires_grad)\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n if attr.requires_grad:\n _num_params += attr.numel()\n return _num_params\n\n @property\n def nbytes(self):\n # Returns the size of all the parameters in bytes\n _bytes = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n for p in attr.parameters():\n _bytes += p.nelement() * p.element_size()\n if hasattr(attr, \"buffers\"):\n for b in attr.buffers():\n _bytes += b.nelement() * b.element_size()\n return _bytes\n\n def setup_processor(self, processor_class: Optional[Type[Processor]], processor_kwargs: Dict) -> None:\n if processor_class is None:\n processor = Identity(self.observation_space, self.action_space)\n else:\n processor = processor_class(self.observation_space, self.action_space, **processor_kwargs)\n\n if processor.supports_gpu: # move it to device if it supports GPU computation.\n self.processor = processor.to(self.device)\n else:\n self.processor = processor\n\n def setup_network(self, network_class: Type[torch.nn.Module], network_kwargs: Dict) -> None:\n self.network = network_class(\n self.processor.observation_space, self.processor.action_space, **network_kwargs\n ).to(self.device)\n\n def setup_optimizers(self) -> None:\n \"\"\"\n This is only called by the Trainer, and not called when we load the model.\n This is done so that inference jobs don't load the optimizer state.\n \"\"\"\n # Setup Optimizers\n assert len(self.optim) == 0, \"setup_optimizers called twice!\"\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n parameters = attr.parameters()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n parameters = [attr]\n # Constrcut the optimizer\n self.optim[k] = self.optim_class(parameters, **self.optim_kwargs)\n\n def setup_schedulers(self):\n assert len(self.schedulers) == 0, \"setup_schedulers called twice!\"\n for k in self.schedulers_class.keys():\n if self.schedulers_class[k] is not None:\n assert k in self.optim, \"Did not find schedule key in optimizers dict.\"\n self.schedulers[k] = self.schedulers_class[k](self.optim[k], **self.schedulers_kwargs.get(k, dict()))\n\n def setup_datasets(self, env: gym.Env, total_steps: int):\n \"\"\"\n Called after everything else has been setup, right before training starts\n This is _only_ called by the trainer and is not called by default.\n This function is responsible for creating the following attributes:\n self.dataset (required)\n self.validation_dataset\n \"\"\"\n assert not hasattr(self, \"dataset\"), \"setup_datasets called twice!\"\n assert not hasattr(self, \"validation_dataset\"), \"setup_datasets called twice!\"\n # Setup the train dataset\n self.dataset = self.dataset_class(self.observation_space, self.action_space, **self.dataset_kwargs)\n # Setup the validation dataset\n if self.validation_dataset_class is not None:\n self.validation_dataset = self.validation_dataset_class(\n self.observation_space, self.action_space, **self.validation_dataset_kwargs\n )\n elif self.validation_dataset_kwargs is not None:\n validation_dataset_kwargs = copy.deepcopy(self.dataset_kwargs)\n validation_dataset_kwargs.update(self.validation_dataset_kwargs)\n self.validation_dataset = self.dataset_class(\n self.observation_space, self.action_space, **validation_dataset_kwargs\n )\n else:\n self.validation_dataset = None\n\n def save(self, path: str, extension: str, metadata: Optional[Dict] = None) -> None:\n \"\"\"\n Saves a checkpoint of the model and the optimizers\n \"\"\"\n save_dict = {}\n if len(self.optim) > 0:\n save_dict[\"optim\"] = {k: v.state_dict() for k, v in self.optim.items()}\n if len(self.schedulers) > 0:\n save_dict[\"schedulers\"] = {k: v.state_dict() for k, v in self.schedulers.items()}\n for k in self._save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"state_dict\"):\n save_dict[k] = attr.state_dict()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n save_dict[k] = attr\n\n # Add the metadata\n save_dict[\"metadata\"] = {} if metadata is None else metadata\n save_path = os.path.join(path, extension)\n if not save_path.endswith(\".pt\"):\n save_path += \".pt\"\n torch.save(save_dict, save_path)\n\n def load(self, checkpoint: str, strict: bool = True) -> Dict:\n \"\"\"\n Loads the model and its associated checkpoints.\n If we haven't created the optimizers and schedulers, do not load those.\n \"\"\"\n print(\"[research] loading checkpoint:\", checkpoint)\n checkpoint = torch.load(checkpoint, map_location=self.device)\n remaining_checkpoint_keys = set(checkpoint.keys())\n\n # First load everything except for the optim\n for k in self.save_keys: # Loop through keys in the Algorithm.\n if k not in checkpoint:\n if strict:\n raise ValueError(\"Checkpoint did not have key \" + str(k))\n else:\n print(\"[research] Warning: Checkpoint did not have key\", k)\n continue\n\n if isinstance(getattr(self, k), torch.nn.Parameter):\n # directly set the data, this is for nn.Parameters\n getattr(self, k).data = checkpoint[k].data\n else:\n # Otherwise, load via state dict\n getattr(self, k).load_state_dict(checkpoint[k], strict=strict)\n remaining_checkpoint_keys.remove(k)\n\n # Now load the optimizer and its associated keys\n for k in self.optim.keys():\n if strict and k not in checkpoint[\"optim\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find optimizer key\")\n elif k not in checkpoint[\"optim\"]:\n print(\"[research] Warning: Checkpoint did not have optimizer key\", k)\n continue\n self.optim[k].load_state_dict(checkpoint[\"optim\"][k])\n if \"optim\" in checkpoint:\n remaining_checkpoint_keys.remove(\"optim\")\n\n # Now load the schedulers\n for k in self.schedulers.keys():\n if strict and k not in checkpoint[\"schedulers\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find scheduler key\")\n elif k not in checkpoint[\"schedulers\"]:\n print(\"[research] Warning: Checkpoint did not have scheduler key\", k)\n continue\n self.schedulers[k].load_state_dict(checkpoint[\"schedulers\"][k])\n if \"schedulers\" in checkpoint:\n remaining_checkpoint_keys.remove(\"schedulers\")\n\n remaining_checkpoint_keys.remove(\"metadata\") # Do not count metadata key, which is always addded.\n if strict and len(remaining_checkpoint_keys) > 0:\n raise ValueError(\"Algorithm did not have keys \", +str(remaining_checkpoint_keys))\n elif len(remaining_checkpoint_keys) > 0:\n print(\"[research] Warning: Checkpoint keys\", remaining_checkpoint_keys, \"were not loaded.\")\n\n return checkpoint[\"metadata\"]\n\n def format_batch(self, batch: Any) -> Any:\n # Convert items to tensor if they are not.\n # Checking first makes sure we do not distrub memory pinning\n if not utils.contains_tensors(batch):\n batch = utils.to_tensor(batch)\n if self.processor.supports_gpu:\n # Move to CUDA first.\n batch = utils.to_device(batch, self.device)\n batch = self.processor(batch)\n else:\n batch = self.processor(batch)\n batch = utils.to_device(batch, self.device)\n return batch\n\n @abstractmethod\n def train_step(self, batch: Any, step: int, total_steps: int) -> Dict:\n \"\"\"\n Train the model. Should return a dict of loggable values\n \"\"\"\n return {}\n\n def validation_step(self, batch: Any) -> Dict:\n \"\"\"\n perform a validation step. Should return a dict of loggable values.\n \"\"\"\n raise NotImplementedError\n\n def env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict:\n \"\"\"\n Perform any extra training operations. This is done before the train step is called.\n A common use case for this would be stepping the environment etc.\n \"\"\"\n return {}\n\n def validation_extras(self, path: str, step: int) -> Dict:\n \"\"\"\n Perform any extra validation operations.\n A common usecase for this is saving visualizations etc.\n \"\"\"\n return {}\n\n def _predict(self, batch: Any, **kwargs) -> Any:\n \"\"\"\n Internal prediction function, can be overridden\n By default, we call torch.no_grad(). If this behavior isn't desired,\n override the _predict funciton in your algorithm.\n \"\"\"\n with torch.no_grad():\n if len(kwargs) > 0:\n raise ValueError(\"Default predict method does not accept key word args, but they were provided.\")\n pred = self.network(batch)\n return pred\n\n def predict(self, batch: Any, is_batched: bool = False, **kwargs) -> Any:\n is_np = not utils.contains_tensors(batch)\n if not is_batched:\n # Unsqeeuze everything\n batch = utils.unsqueeze(batch, 0)\n batch = self.format_batch(batch)\n pred = self._predict(batch, **kwargs)\n if not is_batched:\n pred = utils.get_from_batch(pred, 0)\n if is_np:\n pred = utils.to_np(pred)\n return pred" } ]
import datetime import functools import os import sys import tempfile import gym import numpy as np import torch from abc import abstractmethod from typing import Any, Dict, Optional, Union from research.datasets import ReplayBuffer from research.datasets.replay_buffer import storage from research.envs.base import EmptyEnv from research.networks.base import ModuleContainer from research.utils import runners, utils from .base import Algorithm from research.utils.config import Config
11,234
self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high)
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step elif isinstance(env, runners.AsyncEnv): self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 self._resetting = True env.reset_send() # Ask the env to start resetting. self.env_step = self._async_env_step elif isinstance(env, runners.MPRunner): assert isinstance(self.dataset, ReplayBuffer), "must use replaybuffer for MP RUnner." assert self.dataset.distributed, "ReplayBuffer must be distributed for use with Fully MPRunner." # Launch the runner subprocess. self._eps_since_last_checkpoint = 0 self._checkpoint_dir = tempfile.mkdtemp(prefix="checkpoints_") assert self.offline_steps <= 0, "MPRunner does not currently support offline to online." env.start( fn=_off_policy_collector_subprocess, checkpoint_path=self._checkpoint_dir, storage_path=self.dataset.storage_path, random_steps=self.random_steps, exclude_keys=self.dataset.exclude_keys, total_steps=total_steps, ) self.env_step = self._runner_env_step elif isinstance(env, gym.Env): # Setup Env Metrics self._current_obs = env.reset() self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high)
return utils.to_device(utils.to_tensor(action_range), self.device)
5
2023-10-19 17:25:45+00:00
16k
nbasyl/LLM-FP4
configs/FPQ_config_llama.py
[ { "identifier": "FPPTQSLBatchingQuantLinear_fpq", "path": "quant_layers/fp_linear.py", "snippet": "class FPPTQSLBatchingQuantLinear_fpq(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw\",\n w_bit = 8,\n a_bit = 8,\n w_exponent_bit = 4, a_exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=10, n_H=1, n_V=1, n_a=1):\n super().__init__(in_features, out_features, bias=bias, mode=mode, w_bit=w_bit, a_bit=a_bit, w_exponent_bit= w_exponent_bit, a_exponent_bit=a_exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V, n_a=n_a)\n self.calib_size = None\n self.calib_batch_size = None\n self.calib_need_batching = False\n self.w_maxval = None\n self.w_intervals = None\n self.a_maxval = None\n self.register_buffer('a_bias',None)\n self.a_biases = None ## fix channel-wise biases\n self.a_intervals = None ## now search for tensor scale not the channel-wise biases\n self.register_buffer('a_interval_zero_point',None)\n self.a_intervals_zero_point = None\n self.n_ls = 1\n\n def _initialize_calib_parameters(self):\n \"\"\" \n set parameters for feeding calibration data\n \"\"\"\n self.calib_size = int(self.raw_input.shape[0])\n self.calib_batch_size = int(self.raw_input.shape[0])\n i = 0\n while True:\n numel = (2*(self.raw_input.numel()+self.raw_out.numel())/self.calib_size*self.calib_batch_size) # number of parameters on GPU\n self.parallel_eq_n = int((3*1024*1024*1024/4)//numel)\n if self.parallel_eq_n <= 1:\n self.calib_need_batching = True\n self.calib_batch_size //= 2\n else:\n break\n \n def _initialize_intervals(self):\n # weight intervals \n ## channel wise\n ## specific for QKV\n if self.n_V != 1:\n # print(\"tackling QKV linear\")\n self.n_ls = 3 # number of tensor scale \n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1)\n self.w_intervals = []\n if self.w_bit == 8:\n for i in range(self.w_bit-3):\n M = i + 2\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n \n else:\n for i in range(self.w_bit-1):\n M = i\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n \n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n self.n_a = self.in_features\n self.crb_acts = self.in_features // self.n_a\n x_maxval = x_.view(*x_.shape[:-1],self.n_a,self.crb_acts).abs().amax(list(range(len(x_.shape)-1))+[-1],keepdim=False).unsqueeze(-1)\n tmp_a_maxvals.append(x_maxval)\n\n \n tmp_a_maxvals = torch.cat(tmp_a_maxvals, dim=1)\n self.a_maxval = tmp_a_maxvals.amax(dim=1, keepdim=True)\n self.a_bias = 2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1\n\n\n self.a_interval = (self.a_bias.min())\n self.a_interval_zero_point = torch.round(self.a_interval)\n\n self.a_biases = []\n self.a_intervals = []\n self.a_intervals_zero_point = []\n if self.a_bit == 8:\n for i in range(self.a_bit-3):\n M = i + 2\n E = self.a_bit - 1 - M\n cur_a_bias = (2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n self.a_biases.append(cur_a_bias)\n cur_a_interval = (cur_a_bias.min())\n self.a_intervals.append(cur_a_interval.reshape(1,1))\n self.a_intervals_zero_point.append(torch.round(cur_a_bias.min()))\n \n else:\n for i in range(self.a_bit-1):\n M = i\n E = self.a_bit - 1 - M\n cur_a_bias = (2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n self.a_biases.append(cur_a_bias)\n cur_a_interval = (cur_a_bias.min())\n self.a_intervals.append(cur_a_interval.reshape(1,1))\n self.a_intervals_zero_point.append(torch.round(cur_a_bias.min()))\n\n def _initialize_intervals_eval(self):\n self._initialize_calib_parameters()\n # weight intervals \n ## channel wise\n ## specific for QKV\n if self.n_V != 1:\n # print(\"tackling QKV linear\")\n self.n_ls = 3 # number of tensor scale \n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1) \n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n self.n_a = self.in_features\n self.crb_acts = self.in_features // self.n_a\n x_maxval = x_.view(*x_.shape[:-1],self.n_a,self.crb_acts).abs().amax(list(range(len(x_.shape)-1))+[-1],keepdim=False).unsqueeze(-1)\n tmp_a_maxvals.append(x_maxval)\n\n tmp_a_maxvals = torch.cat(tmp_a_maxvals, dim=1)\n self.a_maxval = tmp_a_maxvals.amax(dim=1, keepdim=True)\n self.a_bias = 2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1\n\n self.a_interval = (self.a_bias.min()).view(1,1)\n self.a_interval_zero_point = torch.round(self.a_interval).view(1,1)\n self.calibrated = True\n\n\n def get_maxval_from_bias(self, rescale_bias, act_or_weight):\n \n \n if act_or_weight == 0:\n \n return (2 - 2 ** (-self.a_mantissa_bit)) * 2 ** (\n 2**self.a_exponent_bit - 1 - rescale_bias\n )\n elif act_or_weight == 1:\n \n return (2 - 2 ** (-self.w_mantissa_bit)) * 2 ** (\n 2**self.w_exponent_bit - 1 - rescale_bias\n )\n\n def get_log_scale(self, x ,act_or_weight):\n \n if act_or_weight == 0:\n \n a_bias = self.a_bias\n a_bias = torch.clamp(torch.round(a_bias), torch.round(self.a_interval), torch.round(self.a_interval) + 2**(self.a_exponent_bit) - 1 ) - self.a_interval_zero_point + self.a_interval\n a_bias = a_bias.float()\n a_maxval = self.get_maxval_from_bias(rescale_bias = a_bias, act_or_weight=0)\n a_minval = -a_maxval\n a = torch.min(torch.max(x, a_minval), a_maxval)\n \n a_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(a)) + a_bias)).detach(), 1.0)\n return a, 2.0 ** (a_log_scales - self.a_mantissa_bit - a_bias)\n \n elif act_or_weight == 1:\n \n w_bias = self.w_interval\n w_bias = w_bias.float()\n w_maxval = self.get_maxval_from_bias(w_bias, 1)\n w_minval = -w_maxval\n w = torch.min(torch.max(x, w_minval), w_maxval)\n w_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(w)) + w_bias)).detach(), 1.0)\n return w, 2.0 ** (w_log_scales - self.w_mantissa_bit - w_bias)\n\n def get_w_scale(self, input, bits, mantissa_bit, bias):\n \n M = mantissa_bit\n E = bits - 1 - M\n bias = bias.float()\n maxval = (2 - 2 ** (-M)) * 2 ** (\n 2**E - 1 - bias\n )\n\n minval = -maxval\n input = torch.min(torch.max(input, minval), maxval)\n input_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(input)) + bias)).detach(), 1.0)\n return input, 2.0 ** (input_log_scales - M - bias)\n \n def get_scale(self, input, bits, mantissa_bit, bias, tensor_scale, tensor_scale_zero_point):\n \n M = mantissa_bit\n E = bits - 1 - M\n \n rescale_bias = torch.clamp(torch.round(bias), torch.round(tensor_scale), torch.round(tensor_scale) + 2**E - 1) - tensor_scale_zero_point + tensor_scale\n rescale_bias = rescale_bias.float()\n\n maxval = (2 - 2 ** (-M)) * 2 ** (\n 2**E - 1 - rescale_bias\n )\n\n minval = -maxval\n input = torch.min(torch.max(input, minval), maxval)\n \n input_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(input)) + rescale_bias)).detach(), 1.0)\n\n return input, 2.0 ** (input_log_scales - M - rescale_bias) \n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None, raw_grad=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _get_pearson_w(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,n_V,crb_rows\n tensor_raw: b,*,1,n_V,crb_rows\n \"\"\"\n b, parallel_eq_n, n_V = tensor_sim.shape[0],tensor_sim.shape[-3],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-3).contiguous_().view(b,-1,n_V,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-3).view(b,-1,n_V,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,n_V,parallel_eq_n\n similarity = similarity.permute(0,2,1).contiguous_()\n return similarity\n \n def _get_pearson_a(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,oc\n tensor_raw: b,*,1,oc\n \"\"\"\n b, parallel_eq_n = tensor_sim.shape[0],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-2).contiguous_().view(b,-1,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-2).view(b,-1,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,parallel_eq_n\n return similarity\n\n def _search_best_w_interval(self, weight_interval_candidates):\n \n # print(f\"weight_interval_candidates shape {weight_interval_candidates.shape}\")\n for man in range(weight_interval_candidates.shape[0]):\n # print(f\"CUR w E{self.w_bit - 1 - man}M{man}\")\n tmp_w_interval = self.w_intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n # print(f\"before search E{self.w_bit-1-man}M{man} self.w_intervals[man] {self.w_intervals[man][0][0]}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_out_expanded = torch.cat(torch.chunk(raw_out_expanded.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,1,n_V,crb_rows\n raw_grad = self.raw_grad\n similarities = []\n \n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_w_interval.repeat(p_ed-p_st,1,1,1,1)\n # print(f\"cur_w_interval {cur_w_interval.shape}\")\n cur_w_interval[:,:,:,h:h+1,:] = weight_interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n \n if self.w_bit == 8:\n w, cur_w_scale = self.get_w_scale(w_sim, bits = self.w_bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_w_scale(w_sim, bits = self.w_bit, mantissa_bit= man, bias= cur_w_interval)\n \n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.in_features) # shape: parallel_eq_n*oc,ic\n bias_sim = self.bias.repeat(p_ed-p_st) if self.bias is not None else None\n # quantize input\n x_sim = self.quant_input(x)\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n*oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=p_ed-p_st, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,n_V,crb_rows\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n,n_V\n if len(similarity.shape) > 3:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-2))) # shape: b, parallel_eq_n, n_V\n else:\n similarity = self._get_pearson_w(raw_out_expanded, out_sim)\n similarity = similarity.sum(dim=0, keepdim=True) # shape: 1, parallel_eq_n, n_V\n similarities.append(similarity)\n # store best weight interval of h into tmp_w_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n, n_V\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n, n_V\n h_best_index = batch_similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_w_interval[:,:,:,h:h+1,:] = torch.gather(weight_interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.w_intervals[man] = tmp_w_interval.squeeze(dim=0)\n\n def _search_best_w_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n if self.w_bit == 8:\n w_mantissa_bits_candidate = [i for i in range(self.w_bit-3)]\n else:\n w_mantissa_bits_candidate = [i for i in range(self.w_bit-1)]\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n # quantize input\n x_sim = self.quant_input(x)\n \n for w_mantissa_bit in w_mantissa_bits_candidate:\n if self.w_bit == 8:\n shift_w_mantissa_bit = w_mantissa_bit + 2\n else:\n shift_w_mantissa_bit = w_mantissa_bit\n \n # print(f\"CUR w E{self.w_bit - 1 - shift_w_mantissa_bit}M{shift_w_mantissa_bit}\")\n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w,cur_w_scale = self.get_w_scale(w_sim, bits = self.w_bit, mantissa_bit= shift_w_mantissa_bit, bias= self.w_intervals[w_mantissa_bit])\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale)\n w_sim = w_sim.view(-1,self.in_features)\n bias_sim = self.bias if self.bias is not None else None\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n batch_similarities.append(similarities)\n batch_similarities = torch.vstack(batch_similarities)\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n\n if self.w_bit == 8:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - self.w_mantissa_bit).to(self.weight.device) \n \n else:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.w_interval = self.w_intervals[best_mantissa_bit]\n # print(f\"search result E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n # print(\"finish searching fp format for linear weight\")\n\n def _search_best_a_interval(self, input_interval_candidates):\n \n for man in range(input_interval_candidates.shape[0]):\n \n tmp_a_interval = self.a_intervals[man].unsqueeze(-1) # shape: n_a,1,1\n\n for a in range(tmp_a_interval.shape[0]): # the whole tensor only has one scaling factor\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_a_interval = tmp_a_interval.repeat(1,1,p_ed-p_st) # shape: n_a,1,parallel_eq_n\n cur_a_interval[a:a+1,:,:] = input_interval_candidates[man][a:a+1,:,p_st:p_ed]\n # quantize weight and bias \n w_sim, bias_sim = self.quant_weight_bias()\n # quantize input\n x_sim=torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2).unsqueeze(-1)\n cur_a_bias = self.a_biases[man].unsqueeze(-1)\n \n cur_a_interval_zero_point = torch.round(cur_a_interval)\n # print(f\"cur_a_interval_zero_point {cur_a_interval_zero_point.shape}\")\n # print(f\"cur_a_bias {cur_a_bias.shape}\")\n if self.a_bit == 8:\n # print(f\"CUR a E{self.a_bit - 1 - man -2}M{man+2}\")\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man+2, bias= cur_a_bias,tensor_scale= cur_a_interval,tensor_scale_zero_point=cur_a_interval_zero_point)\n else:\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man, bias= cur_a_bias,tensor_scale= cur_a_interval,tensor_scale_zero_point=cur_a_interval_zero_point)\n\n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: b,*,n_a,crb_acts,parallel_eq_n\n # print(f\"unique a values{torch.unique(x_sim[0]).shape[0]}\")\n x_sim = x_sim.permute(*list(range(len(x_sim.shape)-3)),-1,-3,-2).reshape(*x.shape[:-1],p_ed-p_st,x.shape[-1]) # shape: b,*,parallel_eq_n,ic\n # print(f\"x_sim {x_sim.shape}\")\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n,oc\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n\n if len(similarity.shape) > 2:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-1))) # shape: b, parallel_eq_n\n else:\n similarity = self._get_pearson_a(raw_out_expanded, out_sim)\n similarity = torch.sum(similarity, dim=0, keepdim=True) # shape: 1, parallel_eq_n\n \n similarities.append(similarity)\n # store best input interval and store in tmp_a_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n\n # print(f\"linear similarity {batch_similarities.sum()}\")\n a_best_index = batch_similarities.argmax(dim=0, keepdim=True).reshape(1,1,-1)\n # a_best_index = batch_similarities.argmax(dim=0, keepdim=True)\n # print(f\"a_best_index {a_best_index.shape}\")\n # print(f\"input_interval_candidates[man] {input_interval_candidates[man].shape}\")\n tmp_a_interval[a:a+1,:,:] = torch.gather(input_interval_candidates[man][a:a+1,:,:],dim=2,index=a_best_index)\n \n self.a_intervals[man] = tmp_a_interval.squeeze(-1)\n self.a_intervals_zero_point[man] = torch.round(self.a_intervals[man])\n\n def _search_best_a_format(self):\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n\n # format candidate\n if self.a_bit == 8:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-3)]\n else:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-1)]\n # quantize input\n w_sim, bias_sim = self.quant_weight_bias()\n # print(f\"before search linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,oc\n raw_grad = self.raw_grad\n similarities = []\n \n for a_mantissa_bit in a_mantissa_bits_candidate:\n if self.a_bit == 8:\n shift_a_mantissa_bit = a_mantissa_bit + 2\n else:\n shift_a_mantissa_bit = a_mantissa_bit\n \n x_sim = torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2)\n\n cur_a_bias = self.a_biases[a_mantissa_bit]\n cur_a_interval = self.a_intervals[a_mantissa_bit]\n cur_a_interval_zero_point = self.a_intervals_zero_point[a_mantissa_bit]\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= shift_a_mantissa_bit, bias= cur_a_bias,tensor_scale= cur_a_interval,tensor_scale_zero_point=cur_a_interval_zero_point)\n \n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: B,*,n_a,crb_acts\n if len(x.shape) == 3:\n x_sim = x_sim.view(x.shape[0],x.shape[1],x.shape[2])\n else:\n x_sim = x_sim.view(x.shape[0],1,x.shape[1])\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc \n if len(raw_out.shape) == 2:\n out_sim = out_sim.view(raw_out.shape[0],raw_out.shape[1])\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n similarity = torch.mean(similarity)\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n batch_similarities.append(similarities)\n \n batch_similarities = torch.vstack(batch_similarities)\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n\n if self.a_bit == 8:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n\n self.a_interval = self.a_intervals[best_mantissa_bit]\n self.a_interval_zero_point = self.a_intervals_zero_point[best_mantissa_bit]\n self.a_bias = self.a_biases[best_mantissa_bit]\n # print(f\"search result linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n # print(\"finish searching fp format for linear activation\")\n\n def calibration_step2(self):\n \"\"\"\n Only use cached raw inputs/outs/grads\n \"\"\"\n self._initialize_calib_parameters()\n self._initialize_intervals()\n\n # prepare weight intervals and similarities\n weight_interval_candidates = []\n if self.w_bit == 8:\n for m in range(self.w_bit-3):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n else:\n for m in range(self.w_bit-1):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n weight_interval_candidates = torch.vstack(weight_interval_candidates)\n\n input_interval_candidates = []\n if self.a_bit == 8:\n for m in range(self.a_bit-3): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n \n else:\n for m in range(self.a_bit-1): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n input_interval_candidates = torch.vstack(input_interval_candidates)\n \n \n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_w_interval(weight_interval_candidates)\n # search for best input interval\n self._search_best_a_interval(input_interval_candidates)\n # search for best weight format\n self._search_best_w_format()\n # search for best input format\n self._search_best_a_format()\n\n print(f\"final w format E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n # print(f\"final self.w_interval {self.w_interval}\")\n # print(f\"final self.w_interval_zero_point {self.w_interval_zero_point}\")\n print(f\"final a format E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n # print(f\"final self.a_interval {self.a_interval}\")\n # print(f\"final self.a_interval_zero_point {self.a_interval_zero_point}\")\n self.calibrated = True\n # self._bias_correction_quant_forward(self.raw_input.to(self.weight.device)) # debugging\n del self.raw_input, self.raw_out, self.raw_grad\n return None" }, { "identifier": "FPPTQSLQuantEmbedding_fpq_baseline", "path": "quant_layers/fp_embed.py", "snippet": "class FPPTQSLQuantEmbedding_fpq_baseline(FPPTQSLQuantEmbedding):\n def __init__(self, \n num_embeddings: int,\n embedding_dim: int,\n padding_idx: int,\n mode = \"raw\",\n bit = 8,\n exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=1, n_H=1, n_V=1):\n super().__init__(num_embeddings, embedding_dim, padding_idx, mode=mode, bit=bit, exponent_bit= exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V)\n self.maxval = None\n self.intervals = None\n\n def _initialize_intervals_eval(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.calibrated = True\n\n def _initialize_intervals(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.intervals = []\n if self.bit == 8: ## need to constrain the exponent as too big exponent bits will result in overflow\n # E7M0, E6M1, E5M2, E4M3, E3M4, E2M5, E1M6, start with E5M2 as E7M0 and E6M1 usually performs quite bad and results in overflow\n for i in range(self.bit-3):\n M = i + 2\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n else:\n for i in range(self.bit-1):\n M = i\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _search_best_interval(self, interval_candidates):\n \n # print(f\"interval_candidates shape {interval_candidates.shape}\")\n for man in range(interval_candidates.shape[0]):\n tmp_interval = self.intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_interval.repeat(p_ed-p_st,1,1,1,1)\n cur_w_interval[:,:,:,h:h+1,:] = interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n \n if self.bit >= 8:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man, bias= cur_w_interval)\n\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim) # shape: parallel_eq_n*oc,ic\n \n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) # shape: B,*,parallel_eq_n,n_V\n if self.n_V == 1:\n similarity = similarity.sum(dim=1, keepdim=True)\n \n similarities.append(similarity)\n # store best weight interval of h into tmp_interval\n similarities = torch.cat(similarities, dim=0) # shape: eq_n, n_V\n h_best_index = similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_interval[:,:,:,h:h+1,:] = torch.gather(interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.intervals[man] = tmp_interval.squeeze(dim=0)\n\n def _search_best_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n if self.bit >= 8:\n mantissa_bits_candidate = [i for i in range(self.bit-3)]\n else:\n mantissa_bits_candidate = [i for i in range(self.bit-1)]\n \n similarities = []\n for mantissa_bit in mantissa_bits_candidate:\n if self.bit >= 8:\n shift_mantissa_bit = mantissa_bit + 2\n else:\n shift_mantissa_bit = mantissa_bit\n \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= shift_mantissa_bit, bias= self.intervals[mantissa_bit])\n \n w_sim = (w/cur_w_scale)\n \n w_sim = w_sim.round_().mul_(cur_w_scale)\n\n \n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim)\n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) #B,*,oc\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n best_mantissa_bit = similarities.argmax(dim=0).item()\n \n if self.bit >= 8:\n self.mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device) \n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.interval = self.intervals[best_mantissa_bit]\n\n def calibration_step2(self):\n\n self._initialize_intervals()\n\n # prepare intervals and similarities\n interval_candidates = []\n if self.bit >=8:\n for m in range(self.bit-3): #m 2 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n \n else:\n for m in range(self.bit-1): #m 0 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n interval_candidates = torch.vstack(interval_candidates)\n\n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_interval(interval_candidates)\n # search for best weight format\n self._search_best_format()\n\n print(f\"search format E{self.exponent_bit}M{self.mantissa_bit}\")\n\n self.calibrated = True\n return None" } ]
from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline
11,265
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs) module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) elif "qlinear" in module_type: kwargs.update(ptqsl_linear_kwargs) if module_type == "qlinear_score": kwargs["n_V"] = 1
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs) module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) elif "qlinear" in module_type: kwargs.update(ptqsl_linear_kwargs) if module_type == "qlinear_score": kwargs["n_V"] = 1
module= FPPTQSLBatchingQuantLinear_fpq(*args,**kwargs,w_bit=w_bit[module_type],a_bit=a_bit[module_type],w_exponent_bit=w_exp_bit[module_type],a_exponent_bit=a_exp_bit[module_type])
0
2023-10-15 06:05:13+00:00
16k
bcmi/libcom
libcom/shadow_generation/source/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "libcom/shadow_generation/source/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "libcom/shadow_generation/source/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "libcom/shadow_generation/source/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "libcom/shadow_generation/source/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "libcom/shadow_generation/source/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "libcom/shadow_generation/source/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "libcom/shadow_generation/source/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "PNDMSampler", "path": "libcom/shadow_generation/source/sampler/pndm.py", "snippet": "class PNDMSampler(object):\n def __init__(self, model, **kwargs) -> None:\n super().__init__()\n self.model = model\n self.num_timesteps = model.num_timesteps\n self.scheduler = PNDMScheduler(\n beta_end=0.012,\n beta_schedule='scaled_linear',\n beta_start=0.00085,\n num_train_timesteps=1000,\n set_alpha_to_one=False,\n skip_prk_steps=True,\n steps_offset=1,\n )\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n features_adapter=None,\n append_to_context=None,\n cond_tau=0.4,\n style_cond_tau=1.0,\n input=None,\n strength=0.7,\n **kwargs\n ):\n num_inference_steps = S\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for PNDM sampling is {size}, eta {eta}')\n device = self.model.betas.device\n self.scheduler.set_timesteps(num_inference_steps)\n\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start:]\n num_inference_steps=num_inference_steps - t_start\n\n if input is None:\n image = torch.randn(size, device=device)\n else:\n # add noise to the composite image\n x0 = self.model.encode_first_stage((input).to(self.model.device))\n x0 = self.model.get_first_stage_encoding(x0)\n noise = torch.randn(x0.shape, device=x0.device, dtype=x0.dtype)\n latent_timestep = timesteps[:1].repeat(batch_size)\n image = self.scheduler.add_noise(original_samples=x0, noise=noise, timesteps=latent_timestep)\n\n\n # with tqdm(total=num_inference_steps) as progress_bar:\n for _, t in enumerate(timesteps):\n ts = torch.full((batch_size,), t, device=device)\n image_input = self.scheduler.scale_model_input(image, t)\n residual = self.model.apply_model(image_input, \n ts, \n conditioning)\n image = self.scheduler.step(residual, t, image).prev_sample\n # progress_bar.update()\n\n return image, 0" }, { "identifier": "latent_guidance_predictor", "path": "libcom/shadow_generation/source/ldm/models/mask_predictor/mask_predictor.py", "snippet": "class latent_guidance_predictor(nn.Module):\n def __init__(self, output_chan, input_chan, num_encodings):\n super(latent_guidance_predictor, self).__init__()\n self.num_encodings = num_encodings\n \n self.layers = nn.Sequential(\n nn.Conv2d(input_chan, 4, 3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(num_features=4),\n nn.Flatten(start_dim=1, end_dim=3),\n nn.Linear(64*64*4, 512),\n nn.ReLU(),\n nn.BatchNorm1d(num_features=512),\n nn.Linear(512, 256), \n nn.ReLU(),\n nn.BatchNorm1d(num_features=256),\n nn.Linear(256, 128), \n nn.ReLU(),\n nn.BatchNorm1d(num_features=128),\n nn.Linear(128, 64), \n nn.ReLU(),\n nn.BatchNorm1d(num_features=64),\n nn.Linear(64, output_chan*64*64)\n )\n\n def init_weights(self, init_type='normal', gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n nn.init.normal_(m.weight.data, 0.0, gain)\n elif init_type == 'xavier':\n nn.init.xavier_normal_(m.weight.data, gain=gain)\n elif init_type == 'kaiming':\n nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n nn.init.orthogonal_(m.weight.data, gain=gain)\n\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n\n elif classname.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, 1.0, gain)\n nn.init.constant_(m.bias.data, 0.0)\n\n self.apply(init_func)\n\n def forward(self, x):\n # Concatenate input pixels with noise level t and positional encodings\n # pos_encoding = [torch.sin(2 * math.pi * t * (2 **-l)) for l in range(self.num_encodings)]\n # pos_encoding = torch.cat(pos_encoding, dim=-1)\n # x = torch.cat((x, t, pos_encoding), dim=-1)\n # x = x.flatten(start_dim=1, end_dim=3)\n \n return self.layers(x)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities import rank_zero_only from omegaconf import ListConfig from libcom.shadow_generation.source.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from libcom.shadow_generation.source.ldm.modules.ema import LitEma from libcom.shadow_generation.source.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from libcom.shadow_generation.source.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from libcom.shadow_generation.source.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from libcom.shadow_generation.source.ldm.models.diffusion.ddim import DDIMSampler from libcom.shadow_generation.source.sampler.pndm import PNDMSampler from libcom.shadow_generation.source.ldm.models.mask_predictor.mask_predictor import latent_guidance_predictor
13,913
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, mode, ddim_steps, input, add_noise_strength, **kwargs): if mode == 'ddim': ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) elif mode == 'pndm': pndm_sampler = PNDMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = pndm_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, input=input, strength=add_noise_strength, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, mode='ddim', input=None, add_noise_strength=1, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim_steps=ddim_steps, eta=ddim_eta, mode=mode, input=input, add_noise_strength=add_noise_strength) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mask=None, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") if mask is not None: ratio = torch.sum(torch.greater_equal(mask, 0.6), dim=(1,2)) / (mask.shape[1] * mask.shape[2]) scale = torch.clamp((1/ratio).type(torch.int32), 2, 19) mask = mask * scale[:, None, None] + 1 mask = mask[:, None, :, :] loss *= mask return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): #TODO # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out def decode_first_stage_with_grad(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c, mask= self.get_input(batch, self.first_stage_key) loss = self(x, c, mask) return loss def forward(self, x, c, mask=None, *args, **kwargs): train_mask_only = kwargs.pop('train_mask_only', False) if train_mask_only: t = torch.randint(0, int(0.3 * self.num_timesteps), (x.shape[0],), device=self.device).long() else: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, mask=mask, train_mask_only=train_mask_only, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, mask=None, noise=None, train_mask_only=False): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False, mask=mask).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) if train_mask_only: pred_x0 = self.predict_start_from_noise(x_t=x_noisy, t=t, noise=model_output) return loss, loss_dict, pred_x0 return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, mode, ddim_steps, input, add_noise_strength, **kwargs): if mode == 'ddim': ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) elif mode == 'pndm': pndm_sampler = PNDMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = pndm_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, input=input, strength=add_noise_strength, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, mode='ddim', input=None, add_noise_strength=1, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim_steps=ddim_steps, eta=ddim_eta, mode=mode, input=input, add_noise_strength=add_noise_strength) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
12
2023-10-19 05:08:12+00:00
16k
e4s2023/E4S2023
training/coach.py
[ { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "CelebAHQDataset", "path": "datasets/dataset.py", "snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n load_vis_img=False, fraction=1.0,\n flip_p=-1, # negative means not flipping\n specific_ids: Union[list, tuple] = None,\n paired: bool = False,\n shuffle: bool = False,\n ):\n assert mode in (\"train\", \"test\", \"all\"), \"CelebAHQDataset mode type unsupported!\"\n self.mode = mode\n if mode in (\"all\",):\n self.roots = [osp.join(dataset_root, \"train\"), osp.join(dataset_root, \"test\")]\n else:\n self.roots = [osp.join(dataset_root, self.mode)]\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.load_vis_img = load_vis_img\n self.fraction = fraction\n self.flip_p = flip_p\n self.paired = paired\n\n self.imgs = []\n self.labels = []\n self.labels_vis = []\n for root in self.roots:\n imgs = sorted(make_dataset(osp.join(root, \"images\")))\n imgs = imgs[:int(len(imgs)*self.fraction)]\n\n labels = sorted(make_dataset(osp.join(root, \"labels\")))\n labels = labels[:int(len(labels)*self.fraction)]\n\n labels_vis = sorted(make_dataset(osp.join(root, \"vis\"))) if self.load_vis_img else None\n labels_vis = labels_vis[:int(len(labels_vis)*self.fraction)] if self.load_vis_img else []\n\n self.imgs.extend(imgs)\n self.labels.extend(labels)\n self.labels_vis.extend(labels_vis)\n\n self.imgs, self.labels, self.labels_vis = self._filter_specific_ids(specific_ids)\n\n if self.load_vis_img:\n assert len(self.imgs) == len(self.labels) == len(self.labels_vis)\n else:\n assert len(self.imgs) == len(self.labels)\n\n print(f\"[CelebAHQDataset] files loaded. mode={self.mode}, #imgs={len(self.imgs)}, \"\n f\"#labels={len(self.labels)}, #vis={len(self.labels_vis)}\")\n\n # # 优化 600 个iteration 的style code保存路径\n # self.optim_codes_dir = \"/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v0_8_stage2_entypeSEAN/optim_Results\"\n \n # image pairs indices\n self.indices = np.arange(len(self.imgs))\n\n # TODO: shuffle the indices\n if shuffle:\n np.random.shuffle(self.indices)\n\n self.pair_indices = self.indices.reshape(-1, 2)\n\n def __len__(self):\n if not self.paired:\n return len(self.indices)\n else:\n return len(self.pair_indices)\n\n def _filter_specific_ids(self, specific_ids: tuple):\n \"\"\" filter the images according to the specific_ids\n \"\"\"\n if specific_ids is None:\n return self.imgs, self.labels, self.labels_vis\n elif self.fraction < 1.0:\n raise ValueError(\"[CelebAHQDataset] specific_ids and fraction cannot be set simultaneously!\")\n\n # parse the tuple into two lists, e.g. ((\"train\",\"12\"), (\"test\",\"45\")) -> (\"train\",\"train\") and (\"12\",\"45\")\n spec_modes, spec_ids = [], []\n id_order_dict = {}\n for idx, spec_id in enumerate(specific_ids):\n one_mode, one_id = spec_id[0], spec_id[1]\n spec_modes.append(one_mode)\n spec_ids.append(one_id)\n id_order_dict[one_id] = {\n \"mode\": one_mode, \"order\": idx,\n }\n\n # filter and re-order\n ret_imgs = [\"\"] * len(specific_ids)\n ret_labels = [\"\"] * len(specific_ids)\n ret_labels_vis = [\"\"] * len(specific_ids)\n found_cnt = 0\n for k in range(len(spec_ids)): # target specific ids\n one_spec_mode = spec_modes[k]\n one_spec_id = spec_ids[k]\n for idx in range(len(self.imgs)): # full dataset\n one_img = self.imgs[idx]\n one_label = self.labels[idx]\n one_label_vis = self.labels_vis[idx] if self.load_vis_img else None\n if one_spec_mode in one_img and one_spec_id == osp.basename(one_img): # found one\n found_cnt += 1\n one_spec_order = id_order_dict[one_spec_id][\"order\"]\n ret_imgs[one_spec_order] = one_img\n ret_labels[one_spec_order] = one_label\n ret_labels_vis[one_spec_order] = one_label_vis\n break\n\n if found_cnt < len(specific_ids):\n print(f\"[[Warning]][CelebAHQDataset] not enough images found (={found_cnt}) for \"\n f\"specific ids (={len(specific_ids)})!\")\n\n ret_imgs = list(filter(None, ret_imgs))\n ret_labels = list(filter(None, ret_labels))\n ret_labels_vis = list(filter(None, ret_labels_vis))\n return ret_imgs, ret_labels, ret_labels_vis\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img_path = self.imgs[index]\n img = Image.open(img_path).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n # label = osp.join(\"/apdcephfs/share_1290939/zhianliu/py_projects/our_editing/ui_results\",\"%s_mask.png\"%osp.basename(label)[:-4])\n label = Image.open(label).convert('L')\n if self.label_transform is not None:\n label = self.label_transform(label)\n\n if self.load_vis_img:\n label_vis = self.labels_vis[index]\n label_vis = Image.open(label_vis).convert('RGB')\n label_vis = TO_TENSOR(label_vis)\n else:\n label_vis = -1 # unified interface\n return img, label, label_vis, img_path\n\n def _output_item(self, idx):\n if not self.paired:\n index = self.indices[idx]\n img, label, label_vis, img_path = self.load_single_image(index)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n return img, label, label_vis, img_path\n else:\n index1 = self.indices[idx * 2]\n index2 = self.indices[idx * 2 + 1]\n img1, label1, label_vis1, img_path1 = self.load_single_image(index1)\n img2, label2, label_vis2, img_path2 = self.load_single_image(index2)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img1 = TF.hflip(img1)\n label1 = TF.hflip(label1)\n if random.random() < self.flip_p:\n img2 = TF.hflip(img2)\n label2 = TF.hflip(label2)\n return {\n \"bag1\": (img1, label1, label_vis1, img_path1),\n \"bag2\": (img2, label2, label_vis2, img_path2)\n }\n\n def __getitem__(self, idx):\n return self._output_item(idx)\n \n # # 1阶段重建的图片\n # img_name = osp.basename(self.imgs[index])[:-4]\n # recon_img = Image.open(osp.join(self.optim_codes_dir,img_name,\"%s_recon.png\"%img_name)).convert('RGB')\n # if self.img_transform is not None:\n # recon_img = self.img_transform(recon_img)\n \n # # 优化后的code\n # optim_code_path = osp.join(self.optim_codes_dir,img_name,\"%s_0600.npy\"%img_name)\n # assert osp.exists(optim_code_path), \"%s 文件不存在!\"%optim_code_path\n # optimed_style_code = np.load(optim_code_path)[0]\n \n # return img, recon_img, optimed_style_code, label, label_vis\n \n # pair_indices = self.pair_indices[idx, :]\n\n # img1, label1, label_vis1 = self.load_single_image(pair_indices[0])\n # img2, label2, label_vis2 = self.load_single_image(pair_indices[1])\n\n # return (img1, img2), (label1, label2), (label_vis1, label_vis2)" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" }, { "identifier": "MASK_CONVERT_TF", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask(celebAHQ_mask))" }, { "identifier": "FFHQDataset", "path": "datasets/dataset.py", "snippet": "class FFHQDataset(Dataset):\n \"\"\"\n FFHQ数据集,提取 mask 的方式参照了Babershop,用的是BiSegNet提取的\n \"\"\"\n\n def __init__(self, dataset_root,\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n fraction=1.0,\n load_raw_label=False,\n flip_p = -1):\n\n self.root = dataset_root\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.fraction=fraction\n self.load_raw_label = load_raw_label\n self.flip_p = flip_p\n \n with open(osp.join(self.root,\"images_1024\",\"ffhq_list.txt\"),\"r\") as f:\n f_lines = f.readlines()\n \n self.imgs = sorted([osp.join(self.root, \"images_1024\", line.replace(\"\\n\",\"\")) for line in f_lines])\n self.imgs = self.imgs[:int(len(self.imgs)*self.fraction)]\n self.labels = [img.replace(\"images_1024\",\"BiSeNet_mask\") for img in self.imgs]\n \n assert len(self.imgs) == len(self.labels)\n \n self.indices = np.arange(len(self.imgs))\n\n def __len__(self):\n return len(self.indices)\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img = self.imgs[index]\n img = Image.open(img).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n label = Image.open(label).convert('L')\n \n if self.load_raw_label:\n original_label = TO_TENSOR(label)\n \n if self.label_transform is not None:\n label = self.label_transform(label)\n\n label_vis = -1 # unified interface\n \n if self.load_raw_label:\n return img, original_label, label, label_vis\n else:\n return img, label, label_vis\n \n def __getitem__(self, idx):\n index = self.indices[idx]\n\n img, label, label_vis = self.load_single_image(index)\n \n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n \n return img, label, label_vis " }, { "identifier": "FFHQ_MASK_CONVERT_TF", "path": "datasets/dataset.py", "snippet": "FFHQ_MASK_CONVERT_TF = transforms.Lambda(\n lambda mask: __ffhq_masks_to_faceParser_mask(mask))" }, { "identifier": "MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask_detailed(celebAHQ_mask))" }, { "identifier": "FFHQ_MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "FFHQ_MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda mask: __ffhq_masks_to_faceParser_mask_detailed(mask))" }, { "identifier": "WNormLoss", "path": "criteria/w_norm.py", "snippet": "class WNormLoss(nn.Module):\n\n\tdef __init__(self, start_from_latent_avg=True):\n\t\tsuper(WNormLoss, self).__init__()\n\t\tself.start_from_latent_avg = start_from_latent_avg\n\n\tdef forward(self, latent, latent_avg=None):\n\t\tif self.start_from_latent_avg:\n\t\t\tlatent = latent - latent_avg\n\t\treturn torch.sum(latent.norm(2, dim=(2, 3))) / (latent.shape[0]*latent.shape[1])" }, { "identifier": "IDLoss", "path": "criteria/id_loss.py", "snippet": "class IDLoss(nn.Module):\n def __init__(self,opts):\n super(IDLoss, self).__init__()\n print('Loading ResNet ArcFace')\n self.opts = opts \n \n self.face_pool_1 = torch.nn.AdaptiveAvgPool2d((256, 256))\n self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')\n self.facenet.load_state_dict(torch.load(opts.ir_se50_path))\n self.face_pool_2 = torch.nn.AdaptiveAvgPool2d((112, 112))\n self.facenet.eval()\n \n self.set_requires_grad(False)\n \n def set_requires_grad(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n \n def extract_feats(self, x):\n x = self.face_pool_1(x) if x.shape[2]!=256 else x # (1) resize to 256 if needed\n x = x[:, :, 35:223, 32:220] # (2) Crop interesting region\n x = self.face_pool_2(x) # (3) resize to 112 to fit pre-trained model\n x_feats = self.facenet(x, multi_scale=self.opts.id_loss_multiscale)\n return x_feats\n\n def forward(self, y_hat, y):\n n_samples = y.shape[0]\n y_feats_ms = self.extract_feats(y) # Otherwise use the feature from there\n y_hat_feats_ms = self.extract_feats(y_hat)\n y_feats_ms = [y_f.detach() for y_f in y_feats_ms] # 各个层的特征\n \n loss_all = 0\n sim_improvement_all = 0\n # 不同尺度\n for y_hat_feats, y_feats in zip(y_hat_feats_ms, y_feats_ms):\n \n loss = 0\n sim_improvement = 0\n count = 0\n # 不同的sample\n for i in range(n_samples):\n sim_target = y_hat_feats[i].dot(y_feats[i])\n sim_views = y_feats[i].dot(y_feats[i])\n \n loss += 1 - sim_target # id loss\n sim_improvement += float(sim_target) - float(sim_views)\n count += 1\n \n loss_all += loss / count\n sim_improvement_all += sim_improvement / count\n \n return loss_all, sim_improvement_all, None" }, { "identifier": "FaceParsingLoss", "path": "criteria/face_parsing/face_parsing_loss.py", "snippet": "class FaceParsingLoss(nn.Module):\n def __init__(self,opts):\n super(FaceParsingLoss, self).__init__()\n print('Loading Face Parsing Net')\n \n self.opts = opts\n self.face_pool = torch.nn.AdaptiveAvgPool2d((512, 512))\n \n self.G = unet()\n self.G.load_state_dict(torch.load(opts.face_parsing_model_path))\n self.G.eval()\n \n self.set_requires_grad(False)\n \n def set_requires_grad(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n \n\n def inference(self, x):\n x = self.face_pool(x) if x.shape[2]!=512 else x # resize to 512 if needed\n labels_predict = self.G(x)\n \n labels_predict_plain = generate_label_plain(labels_predict,imsize=512) # np.array [N,H,W]\n labels_predict_color = generate_label(labels_predict,imsize=512) # torch.Tensor [N,3,H,W]\n \n return labels_predict_plain, labels_predict_color\n \n def extract_feats(self, x):\n x = self.face_pool(x) if x.shape[2]!=512 else x # resize to 512 if needed\n x_feats = self.G.extract_feats(x)\n return x_feats\n\n def forward(self, y_hat, y):\n n_samples = y.shape[0]\n y_feats_ms = self.extract_feats(y) # Otherwise use the feature from there\n y_hat_feats_ms = self.extract_feats(y_hat)\n y_feats_ms = [y_f.detach() for y_f in y_feats_ms] # 各个层的特征\n \n loss_all = 0\n sim_improvement_all = 0\n # 不同尺度\n for y_hat_feats, y_feats in zip(y_hat_feats_ms, y_feats_ms):\n loss = 0\n sim_improvement = 0\n count = 0\n # 不同的sample\n for i in range(n_samples):\n sim_target = y_hat_feats[i].dot(y_feats[i])\n sim_views = y_feats[i].dot(y_feats[i])\n \n loss += 1 - sim_target # id loss\n sim_improvement += float(sim_target) - float(sim_views)\n count += 1\n \n loss_all += loss / count\n sim_improvement_all += sim_improvement / count\n \n return loss_all, sim_improvement_all" }, { "identifier": "LPIPS", "path": "criteria/lpips/lpips.py", "snippet": "class LPIPS(nn.Module):\n r\"\"\"Creates a criterion that measures\n Learned Perceptual Image Patch Similarity (LPIPS).\n Arguments:\n net_type (str): the network type to compare the features:\n 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.\n version (str): the version of LPIPS. Default: 0.1.\n \"\"\"\n def __init__(self, net_type: str = 'alex', version: str = '0.1'):\n\n assert version in ['0.1'], 'v0.1 is only supported now'\n\n super(LPIPS, self).__init__()\n\n # pretrained network\n self.net = get_network(net_type)\n\n # linear layers\n self.lin = LinLayers(self.net.n_channels_list)\n self.lin.load_state_dict(get_state_dict(net_type, version))\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n feat_x, feat_y = self.net(x), self.net(y)\n\n diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]\n res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]\n\n return torch.sum(torch.cat(res, 0)) / x.shape[0]" }, { "identifier": "AdvDLoss", "path": "criteria/adv_loss.py", "snippet": "class AdvDLoss(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(AdvDLoss, self).__init__()\n\n\tdef forward(self, real_pred, fake_pred):\n\t\treal_loss = F.softplus(-real_pred)\n\t\tfake_loss = F.softplus(fake_pred)\n\t\treturn real_loss.mean() + fake_loss.mean()" }, { "identifier": "AdvGLoss", "path": "criteria/adv_loss.py", "snippet": "class AdvGLoss(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(AdvGLoss, self).__init__()\n\n\tdef forward(self, fake_pred):\n\t\tloss = F.softplus(-fake_pred).mean()\n\t\treturn loss" }, { "identifier": "DR1Loss", "path": "criteria/adv_loss.py", "snippet": "class DR1Loss(nn.Module):\n def __init__(self):\n super(DR1Loss, self).__init__()\n\n def forward(self,real_pred, real_img):\n with conv2d_gradfix.no_weight_gradients():\n grad_real, = autograd.grad(\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\n )\n grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()\n\n return grad_penalty" }, { "identifier": "GPathRegularizer", "path": "criteria/adv_loss.py", "snippet": "class GPathRegularizer(nn.Module):\n def __init__(self):\n super(GPathRegularizer, self).__init__()\n \n def forward(self, fake_img, latents, mean_path_length, decay=0.01):\n noise = torch.randn_like(fake_img) / math.sqrt(\n fake_img.shape[2] * fake_img.shape[3]\n )\n grad, = autograd.grad(\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True\n )\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\n\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\n\n path_penalty = (path_lengths - path_mean).pow(2).mean()\n\n return path_penalty, path_mean.detach(), path_lengths" }, { "identifier": "StyleLoss", "path": "criteria/style_loss.py", "snippet": "class StyleLoss(nn.Module):\n def __init__(self, VGG16_ACTIVATIONS_LIST=[21], normalize=False, distance=\"l2\", in_size=256):\n\n super(StyleLoss, self).__init__()\n\n self.vgg16_act = VGG16_Activations(VGG16_ACTIVATIONS_LIST)\n self.vgg16_act.eval()\n\n ## ===== 修改 =====\n self.in_size = in_size\n # self.upsample2d = nn.Upsample(scale_factor=256 / in_size, mode=\"bilinear\", align_corners=True)\n ## ================\n \n self.normalize = normalize\n self.distance = distance\n\n def normalize_img(self, x):\n \"\"\"\n 将x的范围变到 适配 VGG 输入范围 \n \n https://pytorch.org/vision/stable/models.html\n \n x: [bs,3,H,W] 假设范围是 [-1,1]\n \"\"\"\n x = (x + 1) / 2\n \n mean = torch.from_numpy(VGG_MEAN).view(1,3,1,1).to(x.device)\n std = torch.from_numpy(VGG_STD).view(1,3,1,1).to(x.device)\n \n x = (x - mean) / std\n \n return x\n \n def forward(self, x, x_hat, mask_x=None, mask_x_hat=None):\n # x = x.cuda()\n # x_hat = x_hat.cuda()\n # resize images to 256px resolution\n \n N, C, H, W = x.shape\n \n # x = self.upsample2d(x)\n # x_hat = self.upsample2d(x_hat)\n \n x = F.interpolate(x, size=(256,256), mode=\"bilinear\")\n x_hat = F.interpolate(x_hat, size=(256,256), mode=\"bilinear\")\n\n if self.normalize:\n x = self.normalize_img(x)\n x_hat = self.normalize_img(x_hat)\n \n loss = self.cal_style(self.vgg16_act, x, x_hat, mask_x=mask_x, mask_x_hat=mask_x_hat)\n\n return loss\n\n def cal_style(self, model, x, x_hat, mask_x=None, mask_x_hat=None):\n # Get features from the model for x and x_hat\n \n # with torch.no_grad():\n # act_x = self.get_features(model, x)\n # for layer in range(0, len(act_x)):\n # act_x[layer].detach_()\n \n # mask 图片\n if mask_x is not None:\n assert mask_x_hat is not None, \"mask_x_hat 必须存在!\"\n H, W = x.size(2), x.size(3)\n mask_x = F.interpolate(mask_x, size=(H,W),mode=\"bilinear\")\n x = x * mask_x\n \n mask_x_hat = F.interpolate(mask_x_hat, size=(H,W),mode=\"bilinear\")\n x_hat = x_hat * mask_x_hat\n\n act_x = self.get_features(model, x)\n act_x_hat = self.get_features(model, x_hat)\n\n loss = 0.0\n for layer in range(0, len(act_x)):\n # # mask features if present\n # if mask_x is not None:\n # feat_x = self.mask_features(act_x[layer], mask_x)\n # else:\n # feat_x = act_x[layer]\n \n # if mask_x_hat is not None:\n # feat_x_hat = self.mask_features(act_x_hat[layer], mask_x_hat)\n # else:\n # feat_x_hat = act_x_hat[layer]\n \n feat_x = act_x[layer]\n feat_x_hat = act_x_hat[layer]\n\n \"\"\" 可视化 feature maps\n import ipdb; ipdb.set_trace()\n fx = feat_x[0, ...].detach().cpu().numpy()\n fx = (fx - fx.min()) / (fx.max() - fx.min())\n fx = fx * 255.\n fxhat = feat_x_hat[0, ...].detach().cpu().numpy()\n fxhat = (fxhat - fxhat.min()) / (fxhat.max() - fxhat.min())\n fxhat = fxhat * 255\n from PIL import Image\n import numpy as np\n for idx, img in enumerate(fx):\n img = fx[idx, ...]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save('plot/feat_x/{}.png'.format(str(idx)))\n img = fxhat[idx, ...]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save('plot/feat_x_hat/{}.png'.format(str(idx)))\n import ipdb; ipdb.set_trace()\n \"\"\"\n\n # compute Gram matrix for x and x_hat\n G_x = self.gram_matrix(feat_x)\n G_x_hat = self.gram_matrix(feat_x_hat)\n\n # compute layer wise loss and aggregate\n loss += custom_loss(\n G_x, G_x_hat, mask=None, loss_type=self.distance, include_bkgd=True\n )\n\n loss = loss / len(act_x)\n\n return loss\n\n def get_features(self, model, x):\n\n return model(x)\n\n def mask_features(self, x, mask):\n\n mask = prepare_mask(x, mask)\n return x * mask\n\n def gram_matrix(self, x):\n \"\"\"\n :x is an activation tensor\n \"\"\"\n N, C, H, W = x.shape\n x = x.view(N * C, H * W)\n G = torch.mm(x, x.t())\n\n return G.div(N * H * W * C)" }, { "identifier": "Ranger", "path": "training/ranger.py", "snippet": "class Ranger(Optimizer):\n\n\tdef __init__(self, params, lr=1e-3, # lr\n\t\t\t\t alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options\n\t\t\t\t betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options\n\t\t\t\t use_gc=True, gc_conv_only=False\n\t\t\t\t # Gradient centralization on or off, applied to conv layers only or conv + fc layers\n\t\t\t\t ):\n\n\t\t# parameter checks\n\t\tif not 0.0 <= alpha <= 1.0:\n\t\t\traise ValueError(f'Invalid slow update rate: {alpha}')\n\t\tif not 1 <= k:\n\t\t\traise ValueError(f'Invalid lookahead steps: {k}')\n\t\tif not lr > 0:\n\t\t\traise ValueError(f'Invalid Learning Rate: {lr}')\n\t\tif not eps > 0:\n\t\t\traise ValueError(f'Invalid eps: {eps}')\n\n\t\t# parameter comments:\n\t\t# beta1 (momentum) of .95 seems to work better than .90...\n\t\t# N_sma_threshold of 5 seems better in testing than 4.\n\t\t# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.\n\n\t\t# prep defaults and init torch.optim base\n\t\tdefaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,\n\t\t\t\t\t\teps=eps, weight_decay=weight_decay)\n\t\tsuper().__init__(params, defaults)\n\n\t\t# adjustable threshold\n\t\tself.N_sma_threshhold = N_sma_threshhold\n\n\t\t# look ahead params\n\n\t\tself.alpha = alpha\n\t\tself.k = k\n\n\t\t# radam buffer for state\n\t\tself.radam_buffer = [[None, None, None] for ind in range(10)]\n\n\t\t# gc on or off\n\t\tself.use_gc = use_gc\n\n\t\t# level of gradient centralization\n\t\tself.gc_gradient_threshold = 3 if gc_conv_only else 1\n\n\tdef __setstate__(self, state):\n\t\tsuper(Ranger, self).__setstate__(state)\n\n\tdef step(self, closure=None):\n\t\tloss = None\n\n\t\t# Evaluate averages and grad, update param tensors\n\t\tfor group in self.param_groups:\n\n\t\t\tfor p in group['params']:\n\t\t\t\tif p.grad is None:\n\t\t\t\t\tcontinue\n\t\t\t\tgrad = p.grad.data.float()\n\n\t\t\t\tif grad.is_sparse:\n\t\t\t\t\traise RuntimeError('Ranger optimizer does not support sparse gradients')\n\n\t\t\t\tp_data_fp32 = p.data.float()\n\n\t\t\t\tstate = self.state[p] # get state dict for this param\n\n\t\t\t\tif len(state) == 0: # if first time to run...init dictionary with our desired entries\n\t\t\t\t\t# if self.first_run_check==0:\n\t\t\t\t\t# self.first_run_check=1\n\t\t\t\t\t# print(\"Initializing slow buffer...should not see this at load from saved model!\")\n\t\t\t\t\tstate['step'] = 0\n\t\t\t\t\tstate['exp_avg'] = torch.zeros_like(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n\n\t\t\t\t\t# look ahead weight storage now in state dict\n\t\t\t\t\tstate['slow_buffer'] = torch.empty_like(p.data)\n\t\t\t\t\tstate['slow_buffer'].copy_(p.data)\n\n\t\t\t\telse:\n\t\t\t\t\tstate['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n\t\t\t\t# begin computations\n\t\t\t\texp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\t\t\t\tbeta1, beta2 = group['betas']\n\n\t\t\t\t# GC operation for Conv layers and FC layers\n\t\t\t\tif grad.dim() > self.gc_gradient_threshold:\n\t\t\t\t\tgrad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))\n\n\t\t\t\tstate['step'] += 1\n\n\t\t\t\t# compute variance mov avg\n\t\t\t\texp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\t\t\t\t# compute mean moving avg\n\t\t\t\texp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n\t\t\t\tbuffered = self.radam_buffer[int(state['step'] % 10)]\n\n\t\t\t\tif state['step'] == buffered[0]:\n\t\t\t\t\tN_sma, step_size = buffered[1], buffered[2]\n\t\t\t\telse:\n\t\t\t\t\tbuffered[0] = state['step']\n\t\t\t\t\tbeta2_t = beta2 ** state['step']\n\t\t\t\t\tN_sma_max = 2 / (1 - beta2) - 1\n\t\t\t\t\tN_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\t\t\t\t\tbuffered[1] = N_sma\n\t\t\t\t\tif N_sma > self.N_sma_threshhold:\n\t\t\t\t\t\tstep_size = math.sqrt(\n\t\t\t\t\t\t\t(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n\t\t\t\t\t\t\t\t\t\tN_sma_max - 2)) / (1 - beta1 ** state['step'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tstep_size = 1.0 / (1 - beta1 ** state['step'])\n\t\t\t\t\tbuffered[2] = step_size\n\n\t\t\t\tif group['weight_decay'] != 0:\n\t\t\t\t\tp_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n\t\t\t\t# apply lr\n\t\t\t\tif N_sma > self.N_sma_threshhold:\n\t\t\t\t\tdenom = exp_avg_sq.sqrt().add_(group['eps'])\n\t\t\t\t\tp_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n\t\t\t\telse:\n\t\t\t\t\tp_data_fp32.add_(-step_size * group['lr'], exp_avg)\n\n\t\t\t\tp.data.copy_(p_data_fp32)\n\n\t\t\t\t# integrated look ahead...\n\t\t\t\t# we do it at the param level instead of group level\n\t\t\t\tif state['step'] % group['k'] == 0:\n\t\t\t\t\tslow_p = state['slow_buffer'] # get access to slow param tensor\n\t\t\t\t\tslow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha\n\t\t\t\t\tp.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor\n\n\t\treturn loss" }, { "identifier": "Net", "path": "models/networks.py", "snippet": "class LocalMLP(nn.Module):\nclass Net3(nn.Module):\n def __init__(self, dim_component=512, dim_style=512, num_w_layers=18,latent_squeeze_ratio=1):\n def forward(self, x):\n def __init__(self,opts,):\n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n def get_style(self, img, mask):\n def get_style_vectors(self, img, mask):\n def cal_style_codes(self,style_vectors):\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):" }, { "identifier": "Generator", "path": "models/stylegan2/model.py", "snippet": "class Generator(nn.Module):\n def __init__(\n self,\n size,\n style_dim,\n n_mlp,\n channel_multiplier=2,\n blur_kernel=[1, 3, 3, 1],\n lr_mlp=0.01,\n split_layer_idx = 7, \n remaining_layer_idx = 18, \n ):\n super().__init__()\n self.split_layer_idx = split_layer_idx\n self.remaining_layer_idx = remaining_layer_idx\n self.size = size\n\n self.style_dim = style_dim\n\n layers = [PixelNorm()]\n\n for i in range(n_mlp):\n layers.append(\n EqualLinear(\n style_dim, style_dim, lr_mul=lr_mlp, activation=\"fused_lrelu\"\n )\n )\n\n self.style = nn.Sequential(*layers)\n\n self.channels = {\n 4: 512,\n 8: 512,\n 16: 512,\n 32: 512,\n 64: 256 * channel_multiplier,\n 128: 128 * channel_multiplier,\n 256: 64 * channel_multiplier,\n 512: 32 * channel_multiplier,\n 1024: 16 * channel_multiplier,\n }\n\n self.input = ConstantInput(self.channels[4])\n self.conv1 = StyledConv(\n self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel,\n mask_op = True\n )\n self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False,\n mask_op = True\n )\n\n self.log_size = int(math.log(size, 2))\n self.num_layers = (self.log_size - 2) * 2 + 1\n\n self.convs = nn.ModuleList()\n self.upsamples = nn.ModuleList()\n self.to_rgbs = nn.ModuleList()\n self.noises = nn.Module()\n\n in_channel = self.channels[4]\n\n for layer_idx in range(self.num_layers):\n res = (layer_idx + 5) // 2\n shape = [1, 1, 2 ** res, 2 ** res]\n self.noises.register_buffer(\n f\"noise_{layer_idx}\", torch.randn(*shape))\n\n for i in range(3, self.log_size + 1):\n out_channel = self.channels[2 ** i]\n\n self.convs.append(\n StyledConv(\n in_channel,\n out_channel,\n 3,\n style_dim,\n upsample=True,\n blur_kernel=blur_kernel,\n mask_op= False if i > (2+self.remaining_layer_idx//2) else True,\n # mask_op = True\n )\n )\n\n self.convs.append(\n StyledConv(\n out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel,\n mask_op= False if i > (2+self.remaining_layer_idx//2) else True,\n # mask_op = True\n )\n )\n\n self.to_rgbs.append(\n ToRGB(\n out_channel, style_dim, \n mask_op= False if self.remaining_layer_idx != 17 and i >= (2+self.remaining_layer_idx//2) else True, # 这里一定是大于等于\n # mask_op = True\n )\n )\n\n in_channel = out_channel\n\n self.n_latent = self.log_size * 2 - 2\n\n def make_noise(self):\n device = self.input.input.device\n\n noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]\n\n for i in range(3, self.log_size + 1):\n for _ in range(2):\n noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))\n\n return noises\n\n def mean_latent(self, n_latent):\n latent_in = torch.randn(\n n_latent, self.style_dim, device=self.input.input.device\n )\n latent = self.style(latent_in).mean(0, keepdim=True)\n\n return latent\n\n def get_latent(self, input):\n return self.style(input)\n\n def forward(\n self,\n styles,\n structure_feats, # 第7层输出的残差\n mask,\n return_latents=False,\n inject_index=None,\n truncation=1,\n truncation_latent=None,\n input_is_latent=False, # 输入是否是W空间的latetnt code\n noise=None,\n randomize_noise=True,\n use_structure_code=False,\n ):\n if not input_is_latent:\n styles = [self.style(s) for s in styles] # 两个随机的z得到的对应的两组styles\n\n if noise is None:\n if randomize_noise:\n noise = [None] * self.num_layers\n else:\n noise = [\n getattr(self.noises, f\"noise_{i}\") for i in range(self.num_layers)\n ]\n\n if truncation < 1:\n style_t = []\n\n for style in styles:\n style_t.append(\n truncation_latent + truncation * (style - truncation_latent)\n )\n\n styles = style_t\n\n if len(styles) < 2:\n inject_index = self.n_latent\n\n if styles[0].ndim < 4:\n latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n\n else:\n latent = styles[0]\n\n else:\n if inject_index is None: # 选择出两组style交叉交换的位置, TODO 还没改成多个compnent的\n inject_index = random.randint(1, self.n_latent - 1)\n\n latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)\n\n latent = torch.cat([latent, latent2], 1) # 交叉后style\n\n # constant层,其实这里没必要传入latent code,只是为了获取batch size\n out = self.input(latent)\n out = self.conv1(out, latent[:, :, 0], mask, noise=noise[0])\n skip = self.to_rgb1(out, latent[:, :, 1], mask) # 重复使用 latent?\n\n i = 1\n for conv1, conv2, noise1, noise2, to_rgb in zip(\n self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs\n ):\n \n if i<self.remaining_layer_idx: \n out = conv1(out, latent[:, :, i], mask, noise=noise1)\n \n if i+2 == self.split_layer_idx:\n if use_structure_code:\n # skip = torch.zeros_like(skip)\n out = structure_feats # 第5层的特征\n \n intermediate_feats = out\n \n out = conv2(out, latent[:, :, i + 1], mask, noise=noise2)\n if self.remaining_layer_idx == 17 or i+2 != self.remaining_layer_idx: \n skip = to_rgb(out, latent[:, :, i + 2], mask, skip)\n else:\n skip = to_rgb(out, latent[:, 0, i + 2], mask, skip) \n else:\n out = conv1(out, latent[:, 0, i], mask, noise=noise1)\n out = conv2(out, latent[:, 0, i + 1], mask, noise=noise2)\n skip = to_rgb(out, latent[:, 0, i + 2], mask, skip)\n\n i += 2\n\n image = skip\n\n if return_latents:\n return image, latent,intermediate_feats\n\n else:\n return image, None,intermediate_feats" }, { "identifier": "Discriminator", "path": "models/stylegan2/model.py", "snippet": "class Discriminator(nn.Module):\n def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):\n super().__init__()\n\n channels = {\n 4: 512,\n 8: 512,\n 16: 512,\n 32: 512,\n 64: 256 * channel_multiplier,\n 128: 128 * channel_multiplier,\n 256: 64 * channel_multiplier,\n 512: 32 * channel_multiplier,\n 1024: 16 * channel_multiplier,\n }\n\n convs = [ConvLayer(3, channels[size], 1)]\n\n log_size = int(math.log(size, 2))\n\n in_channel = channels[size]\n\n for i in range(log_size, 2, -1):\n out_channel = channels[2 ** (i - 1)]\n\n convs.append(ResBlock(in_channel, out_channel, blur_kernel))\n\n in_channel = out_channel\n\n self.convs = nn.Sequential(*convs)\n\n self.stddev_group = 4\n self.stddev_feat = 1\n\n self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)\n self.final_linear = nn.Sequential(\n EqualLinear(channels[4] * 4 * 4, channels[4],\n activation=\"fused_lrelu\"),\n EqualLinear(channels[4], 1),\n )\n\n def forward(self, input):\n out = self.convs(input)\n\n batch, channel, height, width = out.shape\n group = min(batch, self.stddev_group)\n stddev = out.view(\n group, -1, self.stddev_feat, channel // self.stddev_feat, height, width\n )\n stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)\n stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)\n stddev = stddev.repeat(group, 1, height, width)\n out = torch.cat([out, stddev], 1)\n\n out = self.final_conv(out)\n\n out = out.view(batch, -1)\n out = self.final_linear(out)\n\n return out" } ]
from utils import torch_utils from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, FFHQDataset, FFHQ_MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED, FFHQ_MASK_CONVERT_TF_DETAILED from criteria.w_norm import WNormLoss from criteria.id_loss import IDLoss from criteria.face_parsing.face_parsing_loss import FaceParsingLoss from criteria.lpips.lpips import LPIPS from criteria.adv_loss import AdvDLoss,AdvGLoss,DR1Loss,GPathRegularizer from criteria.style_loss import StyleLoss from training.ranger import Ranger from models.networks import Net, Net2, Net3, NetStage2,MultiScaleNet from tensorboardX import SummaryWriter from torch.utils.data import DataLoader from torch import nn from models.stylegan2.model import Generator,Discriminator from collections import OrderedDict from models.encoder_with_optim import EncoderPlusOptimNet import torchvision.transforms as transforms import torch.nn.functional as F import torch import os import matplotlib import matplotlib.pyplot as plt import torch.distributed as dist import math
13,075
# ==== Initialize network ==== self.net = Net3(self.opts) # print(self.device) self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net) self.net = self.net.to(self.device) self.net_ema = Net3(self.opts).to(self.device).eval() torch_utils.accumulate(self.net_ema,self.net, 0) if self.opts.train_D: self.D = Discriminator(self.opts.out_size).to(self.device).eval() if self.opts.dist_train: # Wrap the model self.net = nn.parallel.DistributedDataParallel(self.net, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) if self.opts.train_D: self.D = nn.parallel.DistributedDataParallel(self.D, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) # 加载整个模型预训练好的参数,继续训练 if self.opts.checkpoint_path is not None: ckpt_dict=torch.load(self.opts.checkpoint_path) self.global_step= ckpt_dict["opts"]["max_steps"]+1 if self.opts.dist_train: self.net.module.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(ckpt_dict["state_dict"]) if self.opts.train_D: self.D.module.load_state_dict(ckpt_dict["D_state_dict"]) else: self.net.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["D_state_dict"],prefix="module.")) print("Resume training at step %d..."%self.global_step) # 加载 stage-1 训练好的参数 elif self.opts.stage1_checkpoint_path is not None: stage1_ckpt = torch.load(self.opts.stage1_checkpoint_path) if self.opts.dist_train: self.net.module.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.module.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.module.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg else: self.net.latent_avg = self.net.latent_avg print('Loading stage-1 pretrained weights!') # 加载styleGAN预训练权重 else: styleGAN2_ckpt = torch.load(self.opts.stylegan_weights) if self.opts.dist_train: self.net.module.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.module.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D.module, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.module.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg.repeat(1, 1) else: self.net.module.latent_avg = self.net.module.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) else: self.net.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg.repeat(1, 1) else: self.net.latent_avg = self.net.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) print('Loading pretrained styleGAN2 weights!') # Estimate latent_avg via dense sampling if latent_avg is not available if self.opts.dist_train: if self.net.module.latent_avg is None: self.net.module.latent_avg = self.net.module.G.mean_latent(int(1e5))[0].detach() else: if self.net.latent_avg is None: self.net.latent_avg = self.net.G.mean_latent(int(1e5))[0].detach() self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0:
matplotlib.use('Agg') # torch.autograd.set_detect_anomaly(True) ACCUM = 0.5 ** (32 / (100 * 1000)) # 0.9977843871238888 class Coach: def __init__(self, opts): self.opts = opts self.global_step = 0 # 分布式训练 if self.opts.dist_train: self.num_gpus = torch.cuda.device_count() self.rank = int(os.environ["RANK"]) self.world_size = int(os.environ["WORLD_SIZE"]) self.local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(self.rank % self.num_gpus) dist.init_process_group( backend='nccl', world_size=self.world_size, rank=self.rank, ) self.device = torch.device("cuda", self.local_rank) else: self.rank=0 # dummy rank self.device = torch.device("cuda", 0) self.opts.device=self.device # ==== Initialize network ==== self.net = Net3(self.opts) # print(self.device) self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net) self.net = self.net.to(self.device) self.net_ema = Net3(self.opts).to(self.device).eval() torch_utils.accumulate(self.net_ema,self.net, 0) if self.opts.train_D: self.D = Discriminator(self.opts.out_size).to(self.device).eval() if self.opts.dist_train: # Wrap the model self.net = nn.parallel.DistributedDataParallel(self.net, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) if self.opts.train_D: self.D = nn.parallel.DistributedDataParallel(self.D, device_ids=[self.local_rank], output_device=self.local_rank, broadcast_buffers=False, find_unused_parameters=True ) # 加载整个模型预训练好的参数,继续训练 if self.opts.checkpoint_path is not None: ckpt_dict=torch.load(self.opts.checkpoint_path) self.global_step= ckpt_dict["opts"]["max_steps"]+1 if self.opts.dist_train: self.net.module.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(ckpt_dict["state_dict"]) if self.opts.train_D: self.D.module.load_state_dict(ckpt_dict["D_state_dict"]) else: self.net.latent_avg = ckpt_dict['latent_avg'].to(self.device) self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["D_state_dict"],prefix="module.")) print("Resume training at step %d..."%self.global_step) # 加载 stage-1 训练好的参数 elif self.opts.stage1_checkpoint_path is not None: stage1_ckpt = torch.load(self.opts.stage1_checkpoint_path) if self.opts.dist_train: self.net.module.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.module.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.module.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.module.latent_avg = self.net.module.latent_avg else: self.net.stage1_net.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["state_dict"],prefix="module.")) if self.opts.train_D: self.D.load_state_dict(torch_utils.remove_module_prefix(stage1_ckpt["D_state_dict"],prefix="module.")) # avg latent code self.net.latent_avg = stage1_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg else: self.net.latent_avg = self.net.latent_avg print('Loading stage-1 pretrained weights!') # 加载styleGAN预训练权重 else: styleGAN2_ckpt = torch.load(self.opts.stylegan_weights) if self.opts.dist_train: self.net.module.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.module.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D.module, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.module.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.module.latent_avg = self.net.module.latent_avg.repeat(1, 1) else: self.net.module.latent_avg = self.net.module.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) else: self.net.G.load_state_dict(styleGAN2_ckpt['g_ema'], strict=False) if self.opts.train_D: if self.opts.out_size == 1024: self.D.load_state_dict(styleGAN2_ckpt['d'], strict=False) # 1024分辨率 可以直接加载 else: self.custom_load_D_state_dict(self.D, styleGAN2_ckpt['d']) # 只加载判别器的部分层 # avg latent code self.net.latent_avg = styleGAN2_ckpt['latent_avg'].to(self.device) if self.opts.learn_in_w: self.net.latent_avg = self.net.latent_avg.repeat(1, 1) else: self.net.latent_avg = self.net.latent_avg.repeat(2 * int(math.log(self.opts.out_size, 2)) -2 , 1) print('Loading pretrained styleGAN2 weights!') # Estimate latent_avg via dense sampling if latent_avg is not available if self.opts.dist_train: if self.net.module.latent_avg is None: self.net.module.latent_avg = self.net.module.G.mean_latent(int(1e5))[0].detach() else: if self.net.latent_avg is None: self.net.latent_avg = self.net.G.mean_latent(int(1e5))[0].detach() self.mse_loss = nn.MSELoss().to(self.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval() if self.opts.id_lambda > 0:
self.id_loss = IDLoss(self.opts).to(self.device).eval()
11
2023-10-15 12:15:01+00:00
16k
sotopia-lab/sotopia
examples/experiment_eval.py
[ { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "EnvAgentComboStorage", "path": "sotopia/database/env_agent_combo_storage.py", "snippet": "class EnvAgentComboStorage(JsonModel):\n env_id: str = Field(default_factory=lambda: \"\", index=True)\n agent_ids: list[str] = Field(default_factory=lambda: [], index=True)" }, { "identifier": "EpisodeLog", "path": "sotopia/database/logs.py", "snippet": "class EpisodeLog(JsonModel):\n # Note that we did not validate the following constraints:\n # 1. The number of turns in messages and rewards should be the same or off by 1\n # 2. The agents in the messages are the same as the agetns\n\n environment: str = Field(index=True)\n agents: list[str] = Field(index=True)\n tag: str | None = Field(index=True)\n models: list[str] | None = Field(index=True)\n messages: list[list[tuple[str, str, str]]] # Messages arranged by turn\n reasoning: str\n rewards: list[\n tuple[float, dict[str, float]] | float\n ] # Rewards arranged by turn\n rewards_prompt: str\n\n @root_validator\n def agent_number_message_number_reward_number_turn_number_match(\n cls, values: Any\n ) -> Any:\n agents, _, reasoning, rewards = (\n values.get(\"agents\"),\n values.get(\"messages\"),\n values.get(\"reasoning\"),\n values.get(\"rewards\"),\n )\n agent_number = len(agents)\n\n assert (\n len(rewards) == agent_number\n ), f\"Number of agents in rewards {len(rewards)} and agents {agent_number} do not match\"\n return values\n\n def render_for_humans(self) -> tuple[list[AgentProfile], list[str]]:\n \"\"\"Generate a human readable version of the episode log.\n\n Returns:\n A tuple of (a list of agent_profiles, a list of str): The agent profiles, and the messages and rewards in each turn.\n \"\"\"\n\n agent_profiles = [\n AgentProfile.get(pk=uuid_str) for uuid_str in self.agents\n ]\n messages_and_rewards = []\n for idx, turn in enumerate(self.messages):\n messages_in_this_turn = []\n if idx == 0:\n assert (\n len(turn) >= 2\n ), \"The first turn should have at least environemnt messages\"\n messages_in_this_turn.append(turn[0][2])\n messages_in_this_turn.append(turn[1][2])\n for sender, receiver, message in turn:\n if receiver == \"Environment\":\n if sender != \"Environment\":\n if \"did nothing\" in message:\n continue\n else:\n if \"said:\" in message:\n messages_in_this_turn.append(\n f\"{sender} {message}\"\n )\n else:\n messages_in_this_turn.append(\n f\"{sender}: {message}\"\n )\n else:\n messages_in_this_turn.append(message)\n messages_and_rewards.append(\"\\n\".join(messages_in_this_turn))\n messages_and_rewards.append(f\"The reasoning is:\\n{self.reasoning}\")\n messages_and_rewards.append(\n f\"The rewards are:\\nAgent 1: {self.rewards[0]}\\nAgent 2: {self.rewards[1]}\"\n )\n return agent_profiles, messages_and_rewards" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "LLM_Name", "path": "sotopia/generation_utils/generate.py", "snippet": "class EnvResponse(BaseModel):\nclass EnvResponsePydanticOutputParser(PydanticOutputParser[EnvResponse]):\nclass ListOfIntOutputParser(BaseOutputParser[list[int]]):\nclass ListOfStrOutputParser(BaseOutputParser[list[str]]):\nclass StrOutputParser(BaseOutputParser[str]):\nclass ScriptOutputParser(BaseOutputParser[ScriptInteractionReturnType]):\n def __init__(self, pydantic_object: Type[BaseModel] = EnvResponse) -> None:\n def parse(self, text: str) -> EnvResponse:\n def get_format_instructions(self) -> str:\n def __init__(\n self,\n number_of_int: int | None = None,\n range_of_int: tuple[int, int] | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[int]:\n def _type(self) -> str:\n def __init__(\n self,\n number_of_str: int | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[str]:\n def _type(self) -> str:\n def __init__(self) -> None:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> str:\n def _type(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> ScriptInteractionReturnType:\n def _type(self) -> str:\ndef _return_fixed_model_version(\n model_name: Literal[\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-turbo\"]\n) -> str:\ndef obtain_chain(\n model_name: LLM_Name,\n template: str,\n input_variables: list[str],\n temperature: float = 0.7,\n max_retries: int = 6,\n) -> LLMChain:\ndef format_bad_output_for_script(\n ill_formed_output: str,\n format_instructions: str,\n agents: list[str],\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef format_bad_output(\n ill_formed_output: str,\n format_instructions: str,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef generate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> OutputType:\nasync def agenerate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> tuple[OutputType, str]:\ndef generate_episode(\n model_name: LLM_Name,\n participants: str = \"Jack (a greedy person), Rose\",\n topic: str = \"lawsuit\",\n extra_info: str = \"\",\n) -> EnvResponse:\nasync def agenerate_env_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n temperature: float = 0.7,\n) -> tuple[EnvironmentProfile, str]:\nasync def agenerate_relationship_profile(\n model_name: LLM_Name,\n agents_profiles: list[str],\n) -> tuple[RelationshipProfile, str]:\nasync def agenerate_enviroment_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n) -> tuple[EnvironmentProfile, str]:\ndef fill_in_background(\n model_name: LLM_Name,\n partial_background: ScriptBackground,\n) -> ScriptBackground:\ndef generate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\ndef generate_action_speak(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\nasync def agenerate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n temperature: float = 0.7,\n script_like: bool = False,\n) -> tuple[AgentAction, str]:\nasync def agenerate_script(\n model_name: LLM_Name,\n background: ScriptBackground,\n temperature: float = 0.7,\n agent_names: list[str] = [],\n agent_name: str = \"\",\n history: str = \"\",\n single_step: bool = False,\n) -> tuple[ScriptInteractionReturnType, str]:\ndef process_history(\n script: ScriptBackground | EnvResponse | dict[str, AgentAction]\n) -> str:\ndef generate_init_profile(\n model_name: LLM_Name, basic_info: dict[str, str]\n) -> str:\ndef convert_narratives(model_name: LLM_Name, narrative: str, text: str) -> str:\ndef generate_goal(model_name: LLM_Name, background: str) -> str:" }, { "identifier": "AgentAction", "path": "sotopia/messages/message_classes.py", "snippet": "class AgentAction(Message):\n action_type: ActionType = Field(\n description=\"whether to speak at this turn or choose to not do anything\"\n )\n argument: str = Field(\n description=\"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\"\n )\n\n def to_natural_language(self) -> str:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"" }, { "identifier": "Message", "path": "sotopia/messages/message_classes.py", "snippet": "class Message(BaseModel):\n \"\"\"\n An interface for messages.\n There is only one required method: to_natural_language\n \"\"\"\n\n def to_natural_language(self) -> str:\n raise NotImplementedError" }, { "identifier": "Observation", "path": "sotopia/messages/message_classes.py", "snippet": "class Observation(Message):\n last_turn: str = Field(description=\"the last turn of the conversation\")\n turn_number: int = Field(description=\"the turn number of the conversation\")\n available_actions: list[ActionType] = Field(\n description=\"the available actions\"\n )\n\n def to_natural_language(self) -> str:\n if self.turn_number == 0:\n return f\"\\n{self.last_turn}\\nConversation Starts:\\n\"\n else:\n return f\"Turn #{self.turn_number-1}: {self.last_turn}\\n\"" }, { "identifier": "BaseSampler", "path": "sotopia/samplers/base_sampler.py", "snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:" }, { "identifier": "ConstraintBasedSampler", "path": "sotopia/samplers/constraint_based_sampler.py", "snippet": "class ConstraintBasedSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 10,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and a list of agents based on the constraints of the environment.\n\n Note: Sampling without replacement is only restricted to single env candidate.\n This is due to the fact that the number of possible combinations of env and agents is huge.\n Please sample for each env separately if you want to sample without replacement.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n env_profiles: list[EnvironmentProfile] = []\n agents_which_fit_scenario: list[list[str]] = []\n\n agent_candidate_ids: set[str] | None = None\n if self.agent_candidates:\n agent_candidate_ids = set(\n str(agent.pk) if not isinstance(agent, str) else agent\n for agent in self.agent_candidates\n )\n else:\n agent_candidate_ids = None\n\n if not replacement:\n assert self.env_candidates and len(self.env_candidates) == 1, (\n \"Sampling without replacement is only restricted to single env candidate (must be provided in the constructor). \"\n \"This is due to the fact that the number of possible combinations of env and agents is huge. \"\n \"Please sample for each env separately if you want to sample without replacement.\"\n )\n\n env_profile_id = (\n self.env_candidates[0].pk\n if not isinstance(self.env_candidates[0], str)\n else self.env_candidates[0]\n )\n\n assert env_profile_id, \"Env candidate must have an id\"\n\n agents_which_fit_scenario = _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, size\n )\n env_profiles = (\n [EnvironmentProfile.get(env_profile_id)] * size\n if isinstance(self.env_candidates[0], str)\n else [self.env_candidates[0]] * size\n )\n else:\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env_profiles.append(env_profile)\n env_profile_id = env_profile.pk\n assert env_profile_id, \"Env candidate must have an id\"\n agents_which_fit_scenario.append(\n _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, 1\n )[0]\n )\n\n assert (\n len(env_profiles) == size\n ), \"Number of env_profiles is not equal to size\"\n assert (\n len(agents_which_fit_scenario) == size\n ), \"Number of agents_which_fit_scenario is not equal to size\"\n\n for env_profile, agent_profile_id_list in zip(\n env_profiles, agents_which_fit_scenario\n ):\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n agent_profiles = [\n AgentProfile.get(id) for id in agent_profile_id_list\n ]\n\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" }, { "identifier": "run_async_server", "path": "sotopia/server.py", "snippet": "@gin.configurable\n@beartype\nasync def run_async_server(\n model_dict: dict[str, LLM_Name],\n sampler: BaseSampler[Observation, AgentAction] = BaseSampler(),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"round-robin\",\n env_agent_combo_list: list[EnvAgentCombo[Observation, AgentAction]] = [],\n omniscient: bool = False,\n script_like: bool = False,\n json_in_script: bool = False,\n tag: str | None = None,\n push_to_db: bool = False,\n using_async: bool = True,\n) -> list[list[tuple[str, str, Message]]]:\n \"\"\"\n Doc incomplete\n\n Args:\n omniscient (bool): Whether the agent knows the goal of the other, default to False\n script_like (bool): Whether we generate the turn in script like manner, default to False\n json_in_script (bool): Whether we requires the script generator to return json (Only valid when script_like is True), default to False\n\n Note: env_agent_combo_list is optional. When it defaults to [], sampler is used\n else the sampler is not used. Please pass in BaseSampler or simply not specify it when using this option.\n \"\"\"\n\n assert not (\n push_to_db and tag is None\n ), \"please provide a tag when push to db\"\n\n # Create Environment and agents\n # This step will be moved to outside this function\n\n env_params = {\n \"model_name\": model_dict[\"env\"],\n \"action_order\": action_order,\n \"evaluators\": [\n RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),\n ],\n \"terminal_evaluators\": [\n ReachGoalLLMEvaluator(model_dict[\"env\"]),\n ],\n }\n agents_model_dict = {\n \"agent1\": model_dict[\"agent1\"],\n \"agent2\": model_dict[\"agent2\"],\n }\n\n def get_agent_class(\n model_name: str,\n ) -> Type[BaseAgent[Observation, AgentAction]]:\n if model_name == \"human\":\n return HumanAgent\n elif script_like and not json_in_script:\n return ScriptWritingAgent\n else:\n return LLMAgent\n\n if env_agent_combo_list:\n assert (\n type(sampler) is BaseSampler\n ), \"No sampler should be used when `env_agent_combo_list` is empty\"\n env_agent_combo_iter = iter(env_agent_combo_list)\n else:\n env_agent_combo_iter = sampler.sample(\n agent_classes=[\n get_agent_class(model_name)\n for model_name in agents_model_dict.values()\n ],\n n_agent=len(agents_model_dict),\n env_params=env_params,\n agents_params=[\n {\"model_name\": model_name} if model_name != \"human\" else {}\n for model_name in agents_model_dict.values()\n ],\n )\n episode_futures = [\n arun_one_episode(\n env=env_agent_combo[0],\n agent_list=env_agent_combo[1],\n model_dict=model_dict,\n omniscient=omniscient,\n script_like=script_like,\n json_in_script=json_in_script,\n tag=tag,\n push_to_db=push_to_db,\n )\n for env_agent_combo in env_agent_combo_iter\n ]\n\n batch_results = (\n await tqdm_asyncio.gather(*episode_futures, desc=\"Running one batch\")\n if using_async\n else [await i for i in episode_futures]\n )\n\n return cast(list[list[tuple[str, str, Message]]], batch_results)" }, { "identifier": "parse_gin_flags", "path": "sotopia_conf/gin_utils.py", "snippet": "def parse_gin_flags(\n gin_search_paths: Sequence[str],\n gin_files: Sequence[str],\n gin_bindings: Sequence[str],\n skip_unknown: Union[bool, Sequence[str]] = False,\n finalize_config: bool = True,\n) -> None:\n \"\"\"Parses provided gin files override params.\n Args:\n gin_search_paths: paths that will be searched for gin files.\n gin_files: paths to gin config files to be parsed. Files will be parsed in\n order with conflicting settings being overriden by later files. Paths may\n be relative to paths in `gin_search_paths`.\n gin_bindings: individual gin bindings to be applied after the gin files are\n parsed. Will be applied in order with conflicting settings being overriden\n by later oens.\n skip_unknown: whether to ignore unknown bindings or raise an error (default\n behavior). Alternatively, a list of configurable names to skip if unknown.\n finalize_config: whether to finalize the config so that it cannot be\n modified (default behavior).\n \"\"\"\n # Register .gin file search paths with gin\n for gin_file_path in gin_search_paths:\n gin.add_config_file_search_path(gin_file_path)\n\n # Parse config files and bindings passed via flag.\n gin.parse_config_files_and_bindings(\n gin_files,\n gin_bindings,\n skip_unknown=skip_unknown,\n finalize_config=finalize_config,\n )\n logging.info(\"Gin Configuration:\")\n for line in gin.config_str().splitlines():\n logging.info(\"%s\", line)" }, { "identifier": "run", "path": "sotopia_conf/gin_utils.py", "snippet": "def run(main: Any) -> None:\n \"\"\"Wrapper for app.run that rewrites gin args before parsing.\"\"\"\n app.run(\n main,\n flags_parser=lambda args: app.parse_flags_with_usage(\n rewrite_gin_args(args)\n ),\n )" } ]
import asyncio import logging import os import subprocess import sys import gin from datetime import datetime from logging import FileHandler from typing import Any, Callable, Generator, Literal, Sequence, cast from absl import app, flags from rich import print from rich.logging import RichHandler from tqdm import tqdm from sotopia.agents import LLMAgent from sotopia.database import ( AgentProfile, EnvAgentComboStorage, EnvironmentProfile, EpisodeLog, ) from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, ) from sotopia.envs.parallel import ParallelSotopiaEnv from sotopia.generation_utils.generate import LLM_Name from sotopia.messages import AgentAction, Message, Observation from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, ) from sotopia.server import run_async_server from sotopia_conf.gin_utils import parse_gin_flags, run
12,663
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None: sampler = ConstraintBasedSampler[Observation, AgentAction]( env_candidates=[env_id] ) env_agent_combo_list = list( sampler.sample(agent_classes=[LLMAgent] * 2, replacement=False) ) for env, agent in env_agent_combo_list: EnvAgentComboStorage( env_id=env.profile.pk, agent_ids=[agent[0].profile.pk, agent[1].profile.pk], ).save() @gin.configurable def _iterate_env_agent_combo_not_in_db( model_names: dict[str, LLM_Name], env_ids: list[str] = [], tag: str | None = None,
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None: sampler = ConstraintBasedSampler[Observation, AgentAction]( env_candidates=[env_id] ) env_agent_combo_list = list( sampler.sample(agent_classes=[LLMAgent] * 2, replacement=False) ) for env, agent in env_agent_combo_list: EnvAgentComboStorage( env_id=env.profile.pk, agent_ids=[agent[0].profile.pk, agent[1].profile.pk], ).save() @gin.configurable def _iterate_env_agent_combo_not_in_db( model_names: dict[str, LLM_Name], env_ids: list[str] = [], tag: str | None = None,
) -> Generator[EnvAgentCombo[Observation, AgentAction], None, None]:
12
2023-10-23 19:47:26+00:00
16k
uukuguy/multi_loras
multi_loras/slora/router/manager.py
[ { "identifier": "SamplingParams", "path": "multi_loras/slora/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret" }, { "identifier": "Req", "path": "multi_loras/slora/io_struct.py", "snippet": "class Req:\n def __init__(self, adapter_dir, request_id, prompt_ids, sample_params: SamplingParams):\n self.adapter_dir = adapter_dir\n self.request_id = request_id\n self.prompt_ids = prompt_ids\n self.input_len = len(prompt_ids)\n self.max_output_len = sample_params.max_new_tokens\n self.sample_params = sample_params\n self.output_ids = []\n self.output_metadata_list = []\n self.has_generate_finished = False\n self.aborted = False\n\n def to_rpc_obj(self):\n return {\"adapter_dir\": self.adapter_dir,\n \"request_id\": self.request_id,\n \"input_id\": self.prompt_ids,\n \"output_len\": self.max_output_len,\n \"sampling_param\": self.sample_params.to_dict() }\n\n def to_req_detokenization_state(self):\n out = ReqDetokenizationState(self.request_id, self.prompt_ids, self.max_output_len, self.sample_params.ignore_eos)\n if self.output_metadata_list:\n out.gen_metadata.update(self.output_metadata_list[-1])\n return out\n \n def stop_sequences_matched(self):\n for stop_token_ids in self.sample_params.stop_sequences:\n stop_len = len(stop_token_ids)\n if stop_len > 0:\n if len(self.output_ids) >= stop_len:\n if all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)):\n return True\n return False\n\n def __repr__(self):\n return (f\"request_id(n={self.request_id}, \"\n f\"adapter_dir={self.adapter_dir}, \"\n f\"prompt_ids={self.prompt_ids}, \")" }, { "identifier": "Batch", "path": "multi_loras/slora/io_struct.py", "snippet": "class Batch:\n def __init__(self, batch_id, reqs: List[Req]):\n self.batch_id = batch_id\n self.reqs = reqs\n self.id_to_reqs = {req.request_id: req for req in reqs}\n\n self.adapter_dirs = set()\n for req in reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def input_tokens(self):\n batch_input_tokens = 0\n for req in self.reqs:\n batch_input_tokens += req.input_len\n return batch_input_tokens\n\n def calcu_max_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + req.max_output_len\n return tokens\n \n def calcu_used_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + len(req.output_ids)\n return tokens\n\n def mark_finished_req(self, eos_id):\n has_new_finish = False\n for req in self.reqs:\n if req.stop_sequences_matched():\n req.has_generate_finished = True\n has_new_finish = True\n if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False:\n req.has_generate_finished = True\n has_new_finish = True\n if len(req.output_ids) >= req.max_output_len or req.aborted:\n req.has_generate_finished = True\n has_new_finish = True\n return has_new_finish\n\n def filter_finished(self):\n unfinished_req = []\n for req in self.reqs:\n if not req.has_generate_finished:\n unfinished_req.append(req)\n self.reqs = unfinished_req\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n\n self.adapter_dirs = set()\n for req in self.reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def is_clear(self):\n return len(self.reqs) == 0\n\n def merge(self, mini_batch):\n for _req in mini_batch.reqs:\n self.reqs.append(_req)\n self.adapter_dirs.add(_req.adapter_dir)\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n return\n\n def __repr__(self):\n return (f\"batch_id={self.batch_id}, \"\n # f\"reqs={self.reqs}, \"\n f\"req_ids={self.id_to_reqs.keys()}\")" }, { "identifier": "BatchAbortReq", "path": "multi_loras/slora/io_struct.py", "snippet": "class BatchAbortReq:\n def __init__(self, req_ids):\n self.reqs: List[str] = req_ids" }, { "identifier": "BatchTokenIdOut", "path": "multi_loras/slora/io_struct.py", "snippet": "class BatchTokenIdOut:\n def __init__(self):\n self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = [] # [req_id, new_token_id, gen_metadata, finished_state, abort_state]" }, { "identifier": "AbortReq", "path": "multi_loras/slora/io_struct.py", "snippet": "class AbortReq:\n def __init__(self, req_id):\n self.req_id = req_id" }, { "identifier": "InputParams", "path": "multi_loras/slora/router/input_params.py", "snippet": "class InputParams:\n\n def __init__(\n self,\n max_req_total_len,\n # kv cache manager parameters\n max_total_token_num,\n pool_size_lora,\n batch_max_tokens,\n running_max_req_size,\n # mem_ratio,\n # adapter_ratio,\n # heuristic\n swap,\n prefetch,\n prefetch_size,\n scheduler,\n profile,\n batch_num_adapters,\n enable_abort,\n # kernel,\n # # debug\n dummy,\n no_lora_compute,\n no_lora_swap,\n # no_lora_copy,\n no_kernel,\n no_mem_pool,\n bmm,\n ) -> None:\n self.max_req_total_len = max_req_total_len\n self.max_total_token_num = max_total_token_num\n self.pool_size_lora = pool_size_lora\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n # self.mem_ratio = mem_ratio\n # self.adapter_ratio = adapter_ratio\n\n self.swap = swap\n self.prefetch = prefetch\n self.prefetch_size = prefetch_size\n self.scheduler = scheduler\n self.profile = profile\n self.batch_num_adapters = batch_num_adapters\n self.enable_abort = enable_abort\n # self.kernel = kernel\n\n self.dummy = dummy\n self.no_lora_compute = no_lora_compute\n self.no_lora_swap = no_lora_swap\n # self.no_lora_copy = no_lora_copy\n self.no_kernel = no_kernel\n self.no_mem_pool = no_mem_pool\n self.bmm = bmm\n return" }, { "identifier": "start_model_process", "path": "multi_loras/slora/router/model_infer/model_rpc.py", "snippet": "async def start_model_process(port, world_size):\n # 单卡时不使用 rpc\n if world_size == 1:\n return ModelRpcClient(ModelRpcServer(), world_size)\n \n import multiprocessing\n proc = multiprocessing.Process(target=_init_env, args=(port,))\n proc.start()\n await asyncio.sleep(2)\n repeat_count = 0\n while repeat_count < 20:\n try:\n con = rpyc.connect(\"localhost\", port, config={\"allow_pickle\": True})\n break\n except BaseException:\n await asyncio.sleep(1)\n repeat_count += 1\n if repeat_count == 20:\n raise Exception(\"init rpc env error!\")\n\n assert proc.is_alive()\n return ModelRpcClient(con.root, world_size, rpc_server_process=proc)" }, { "identifier": "ModelRpcClient", "path": "multi_loras/slora/router/model_infer/model_rpc.py", "snippet": "class ModelRpcClient:\n def __init__(self, model_rpc, world_size, rpc_server_process=None):\n self.model: ModelRpcServer = model_rpc\n self.world_size = world_size\n self.rpc_server_process = rpc_server_process\n self.use_rpc = self.world_size != 1\n if self.use_rpc:\n def async_wrap(f):\n f = rpyc.async_(f)\n async def _func(*args, **kwargs):\n ans = f(*args, **kwargs)\n await asyncio.to_thread(ans.wait)\n # raise if exception\n return ans.value\n return _func\n self._init_model = async_wrap(self.model.init_model)\n self._load_adapters = rpyc.async_(self.model.load_adapters)\n self._offload_adapters = rpyc.async_(self.model.offload_adapters)\n self._unmerge_adapter = rpyc.async_(self.model.unmerge_adapter)\n self._merge_adapter = rpyc.async_(self.model.merge_adapter)\n self._add_batch = async_wrap(self.model.add_batch)\n self._prefill_batch = async_wrap(self.model.prefill_batch)\n self._decode_batch = async_wrap(self.model.decode_batch)\n self._filter_batch = async_wrap(self.model.filter_batch)\n self._merge_batch = async_wrap(self.model.merge_batch)\n self._remove_batch = async_wrap(self.model.remove_batch)\n self._profile_prefill = async_wrap(self.model.profile_prefill)\n else:\n self._init_model = self.model.exposed_init_model\n self._load_adapters = self.model.exposed_load_adapters\n self._offload_adapters = self.model.exposed_offload_adapters\n self._merge_adapter = self.model.exposed_merge_adapter\n self._unmerge_adapter = self.model.exposed_unmerge_adapter\n self._add_batch = self.model.exposed_add_batch\n self._prefill_batch = self.model.exposed_prefill_batch\n self._decode_batch = self.model.exposed_decode_batch\n self._filter_batch = self.model.exposed_filter_batch\n self._merge_batch = self.model.exposed_merge_batch\n self._remove_batch = self.model.exposed_remove_batch\n self._profile_prefill = self.model.exposed_profile_prefill\n return\n\n async def init_model(self, rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t prefetch_stream):\n ans : rpyc.AsyncResult = self._init_model(rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t\t\t\t prefetch_stream)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n\n async def load_adapters(self, reqs, prefetch=False):\n self._load_adapters(reqs, prefetch=prefetch)\n\n\n async def offload_adapters(self, reserved_reqs=None, prefetch=False):\n self._offload_adapters(reserved_reqs, prefetch=prefetch)\n \n async def unmerge_adapter(self):\n self._unmerge_adapter()\n \n async def merge_adapter(self):\n self._merge_adapter()\n\n\n async def init_batch(self, batch_id, reqs):\n ans = self._add_batch(batch_id, reqs, \"fp16\")\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def prefill_batch(self, batch_id):\n ans = self._prefill_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def decode_batch(self, batch_id):\n ans = self._decode_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def filter_batch(self, batch_id, req_id_list):\n ans = self._filter_batch(batch_id, req_id_list)\n if self.use_rpc:\n await ans\n return\n else:\n return \n\n async def merge_batch(self, batch_id1, batch_id2):\n ans = self._merge_batch(batch_id1, batch_id2)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def remove_batch(self, batch_id):\n ans = self._remove_batch(batch_id)\n if self.use_rpc:\n await ans\n return\n else:\n return\n \n async def profile_prefill(self):\n ans = self._profile_prefill()\n if self.use_rpc:\n return await ans\n else:\n return ans" }, { "identifier": "ReqQueue", "path": "multi_loras/slora/router/req_queue.py", "snippet": "class ReqQueue:\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n self.max_total_tokens = max_total_tokens\n assert batch_max_tokens is not None\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n self.waiting_req_list: List[Req] = []\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "Stats", "path": "multi_loras/slora/router/stats.py", "snippet": "class Stats:\n\n def __init__(self, log_status, log_stats_interval) -> None:\n self.log_stats = log_status\n self.log_stats_interval = log_stats_interval\n self.last_log_time = time.time()\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n return\n \n def count_prompt_tokens(self, run_batch):\n if self.log_stats:\n tokens = run_batch.input_tokens()\n self.prompt_tokens += tokens\n self.all_tokens += tokens\n return\n \n def count_output_tokens(self, run_batch):\n if self.log_stats:\n tokens = len(run_batch.reqs)\n self.output_tokens += tokens\n self.all_tokens += tokens\n return\n\n def print_stats(self):\n if not self.log_stats:\n return\n\n now = time.time()\n if now - self.last_log_time > self.log_stats_interval:\n print(f\"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s\")\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n self.last_log_time = now\n return" }, { "identifier": "AlphaModel", "path": "multi_loras/slora/router/profiler.py", "snippet": "class AlphaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n print(self.base_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n\n def get_latency(self, batch_size, seq_len):\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n return (self.base_prefill[batch_size - 1][seq_len] + self.base_prefill[batch_size + 1][seq_len]) / 2\n else:\n return np.Inf" }, { "identifier": "BetaModel", "path": "multi_loras/slora/router/profiler.py", "snippet": "class BetaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n self.adapter_prefill = profiling_results[1]\n print(self.adapter_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n \n def get_latency(self, rank_size, batch_size, seq_len):\n if rank_size == 0: return 0\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.adapter_prefill[rank_size][batch_size][seq_len] - self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.adapter_prefill[rank_size][2][seq_len] - self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n a = self.adapter_prefill[rank_size][batch_size - 1][seq_len] - self.base_prefill[batch_size - 1][seq_len]\n b = self.adapter_prefill[rank_size][batch_size + 1][seq_len] - self.base_prefill[batch_size + 1][seq_len]\n return (a + b) / 2\n else:\n return np.Inf" }, { "identifier": "PETSReqQueue", "path": "multi_loras/slora/router/pets_req_queue.py", "snippet": "class PETSReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.alpha = None\n self.beta = None # will be set automatically in the profiling function in router.manager\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n def intra_task_batching(self, lora_ranks):\n ## Preprocessing: gather the queries with the same adapter.\n clustered_queries_by_adapter = {}\n for query in self.waiting_req_list:\n adapter_dir = query.adapter_dir\n if adapter_dir in clustered_queries_by_adapter:\n clustered_queries_by_adapter[adapter_dir].append(query)\n else:\n clustered_queries_by_adapter[adapter_dir] = [query]\n\n ## DP\n mini_batches = []\n for adapter_dir, queries in clustered_queries_by_adapter.items():\n state_1st_stage = []\n split_idx_list = []\n\n ### Sort queries according to the sequence length in ascending order.\n queries = sorted(queries, key=lambda x: x.input_len)\n queries.insert(0, None) # Sentinel.\n\n ### Initialize.\n state_1st_stage.append(0)\n split_idx_list.append(0)\n for j in range(1, len(queries)):\n min_cost = np.Inf # INF\n split_idx = 0\n for k in range(1, j+1):\n lora_rank = lora_ranks[adapter_dir]\n tmp = state_1st_stage[k-1] + self.beta.get_latency(lora_rank, j-k+1, queries[j].input_len)\n if tmp < min_cost:\n min_cost = tmp\n split_idx = k-1\n split_idx_list.append(split_idx)\n state_1st_stage.append(min_cost)\n \n ### Split queries into mini-batches according to split_idx_list.\n \n end_idx = len(queries) - 1\n\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n mini_batch = []\n max_len = queries[end_idx].input_len\n for j in range(start_idx, end_idx + 1):\n mini_batch.append(queries[j]) \n mini_batches.append((mini_batch, max_len))\n end_idx = split_idx_list[end_idx] \n \n return mini_batches\n \n # Inter-task batching.\n def inter_task_batching(self, mini_batches):\n ## Sort mini_batches according to the max sequence length.\n mini_batches = sorted(mini_batches, key=lambda x: x[1])\n mini_batches.insert(0, None) # Sentinel.\n\n tmp = 0\n mini_batch_sum = [0]\n for mini_batch in mini_batches[1:]:\n tmp += len(mini_batch[0])\n mini_batch_sum.append(tmp)\n\n ## DP.\n state_2nd_stage = []\n split_idx_list = []\n state_2nd_stage.append(0)\n split_idx_list.append(0)\n\n for i in range(1, len(mini_batches)):\n min_cost = np.Inf # INF\n split_idx = 0\n for j in range(1, i+1):\n total_samples = mini_batch_sum[i] - mini_batch_sum[j-1]\n tmp = state_2nd_stage[j-1] + self.alpha.get_latency(total_samples, mini_batches[i][1])\n if tmp < min_cost:\n min_cost = tmp\n split_idx = j - 1\n split_idx_list.append(split_idx)\n state_2nd_stage.append(min_cost)\n\n ## Split mini_batches into final scheduled_batches.\n ### Split mini_batches into macro_batches.\n\n end_idx = len(mini_batches) - 1\n macro_batches = []\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n macro_batch = []\n max_len = mini_batches[end_idx][1]\n for j in range(start_idx, end_idx + 1):\n macro_batch.append(mini_batches[j]) \n macro_batches.append((macro_batch, max_len))\n end_idx = split_idx_list[end_idx] \n\n total_samples = 0\n for macro_batch in macro_batches:\n for mini_batch in macro_batch[0]:\n total_samples += len(mini_batch[0])\n # print(total_samples)\n\n return macro_batches\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n reqs = self.waiting_req_list\n # when waiting_reqs > 20\n if len(self.waiting_req_list) > 10:\n mini_batches = self.intra_task_batching(lora_ranks)\n macro_batches = self.inter_task_batching(mini_batches)\n \n macro_batch = macro_batches[-1][0]\n reqs = [req for minibatch in macro_batch for req in minibatch[0]]\n \n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in reqs:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "PEFTReqQueue", "path": "multi_loras/slora/router/peft_req_queue.py", "snippet": "class PEFTReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n if len(self.waiting_req_list) > 0 and current_batch is None:\n adapter_dir = self.waiting_req_list[0].adapter_dir\n if current_batch is not None:\n adapter_dir = current_batch.reqs[0].adapter_dir\n # heuristics:\n # TODO: think more\n max_other_waited_reqs = 30\n other_waited_reqs = 0\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n other_waited_reqs += 1\n if other_waited_reqs > max_other_waited_reqs:\n return None\n continue\n if req.adapter_dir == adapter_dir:\n break\n\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n continue\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "ClusterReqQueue", "path": "multi_loras/slora/router/cluster_req_queue.py", "snippet": "class ClusterReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, batch_num_adapters) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.batch_num_adapters = batch_num_adapters\n\n def _generate_new_batch_prioritizing_existing_adapters(self, current_batch:Batch, lora_ranks: dict[str, int]):\n filtered_waiting_req_list = list(filter(lambda req: req.adapter_dir in current_batch.adapter_dirs, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n can_run_list = []\n new_batch_total_tokens = 0\n for idx, req in enumerate(filtered_waiting_req_list):\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n\n # If filtered waiting list was not enough to max-out the current running batch, we resolve to FIFO\n for req in self.waiting_req_list:\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n\n return can_run_list\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n\n if current_batch is not None and len(current_batch.adapter_dirs) >= self.batch_num_adapters:\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n rest_of_batch = self._generate_new_batch_prioritizing_existing_adapters(current_batch, lora_ranks)\n can_run_list += rest_of_batch\n if len(can_run_list) != 0:\n return Batch(uuid.uuid4().hex, can_run_list)\n else:\n return None\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None" }, { "identifier": "AbortReqQueue", "path": "multi_loras/slora/router/abort_req_queue.py", "snippet": "class AbortReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.abort_req_list: List[str] = []\n self.req_time_stamp = []\n self.init_bs = 1\n self.apprx_req_rate = 1\n self.apprx_bs = self.init_bs\n self.last_req_num = 0\n self.last_time = time.time()\n \n def append(self, req):\n self.waiting_req_list.insert(0, req)\n self.req_time_stamp.insert(0, time.time())\n assert len(self.waiting_req_list) == len(self.req_time_stamp)\n return\n\n def reset_abort_list(self):\n self.abort_req_list = []\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n self.apprx_req_rate = int(0.7 * (len(self.waiting_req_list) - self.last_req_num) + 0.3 * self.apprx_req_rate)\n for i, req in enumerate(self.waiting_req_list):\n if attainment_func(time.time() - self.req_time_stamp[i] + 0.5) == 0:\n req.aborted = True\n aborted_count += 1\n abort_list.append(req)\n self.abort_req_list.append(req.request_id)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in abort_list]\n \n if self.apprx_req_rate >= self.apprx_bs:\n print(\"apprx bs\", self.apprx_bs, \"req rate\", self.apprx_req_rate)\n # choose from the latest requests\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n elif self.apprx_req_rate < self.apprx_bs:\n # choose from the earliest requests\n for req in reversed(self.waiting_req_list):\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n \n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in can_run_list and self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n self.last_req_num = len(self.waiting_req_list)\n self.apprx_bs = max(int(0.7 * len(new_batch.reqs) + 0.3 * self.apprx_bs), self.init_bs)\n return new_batch\n else:\n return None" }, { "identifier": "get_lora_config", "path": "multi_loras/slora/models/peft/lora_adapter.py", "snippet": "def get_lora_config(lora_dir, dummy):\n if dummy:\n return get_lora_config_json(lora_dir), lora_dir\n else:\n lora_dir = re.sub(r'-(\\d+)$', '', lora_dir)\n return hf_load_config(lora_dir)" } ]
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq, BatchTokenIdOut, AbortReq from .input_params import InputParams from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from .stats import Stats from .profiler import AlphaModel, BetaModel from .pets_req_queue import PETSReqQueue from .peft_req_queue import PEFTReqQueue from .cluster_req_queue import ClusterReqQueue from .abort_req_queue import AbortReqQueue from ..models.peft.lora_adapter import get_lora_config
12,422
if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: self.req_queue = AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: self.req_queue = ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: self.req_queue = AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: self.req_queue = ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0:
self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list))
3
2023-10-16 02:39:47+00:00
16k
MobileLLM/AutoDroid
droidbot/input_manager.py
[ { "identifier": "EventLog", "path": "droidbot/input_event.py", "snippet": "class EventLog(object):\n \"\"\"\n save an event to local file system\n \"\"\"\n\n def __init__(self, device, app, event, profiling_method=None, tag=None):\n self.device = device\n self.app = app\n self.event = event\n if tag is None:\n from datetime import datetime\n tag = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n self.tag = tag\n\n self.from_state = None\n self.to_state = None\n self.event_str = None\n\n self.profiling_method = profiling_method\n self.trace_remote_file = \"/data/local/tmp/event.trace\"\n self.is_profiling = False\n self.profiling_pid = -1\n self.sampling = None\n # sampling feature was added in Android 5.0 (API level 21)\n if profiling_method is not None and \\\n str(profiling_method) != \"full\" and \\\n self.device.get_sdk_version() >= 21:\n self.sampling = int(profiling_method)\n\n def to_dict(self):\n return {\n \"tag\": self.tag,\n \"event\": self.event.to_dict(),\n \"start_state\": self.from_state.state_str,\n \"stop_state\": self.to_state.state_str,\n \"event_str\": self.event_str\n }\n\n def save2dir(self, output_dir=None):\n # Save event\n if output_dir is None:\n if self.device.output_dir is None:\n return\n else:\n output_dir = os.path.join(self.device.output_dir, \"events\")\n try:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n event_json_file_path = \"%s/event_%s.json\" % (output_dir, self.tag)\n event_json_file = open(event_json_file_path, \"w\")\n json.dump(self.to_dict(), event_json_file, indent=2)\n event_json_file.close()\n except Exception as e:\n self.device.logger.warning(\"Saving event to dir failed.\")\n self.device.logger.warning(e)\n\n def save_views(self, output_dir=None):\n # Save views\n views = self.event.get_views()\n if views:\n for view_dict in views:\n self.from_state.save_view_img(view_dict=view_dict, output_dir=output_dir)\n\n def is_start_event(self):\n if isinstance(self.event, IntentEvent):\n intent_cmd = self.event.intent\n if \"start\" in intent_cmd and self.app.get_package_name() in intent_cmd:\n return True\n return False\n\n def start(self):\n \"\"\"\n start sending event\n \"\"\"\n self.from_state = self.device.get_current_state()\n self.start_profiling()\n self.event_str = self.event.get_event_str(self.from_state)\n print(\"Action: %s\" % self.event_str)\n self.device.send_event(self.event)\n\n def start_profiling(self):\n \"\"\"\n start profiling the current event\n @return:\n \"\"\"\n if self.profiling_method is None:\n return\n if self.is_profiling:\n return\n pid = self.device.get_app_pid(self.app)\n if pid is None:\n if self.is_start_event():\n start_intent = self.app.get_start_with_profiling_intent(self.trace_remote_file, self.sampling)\n self.event.intent = start_intent.get_cmd()\n self.is_profiling = True\n return\n if self.sampling is not None:\n self.device.adb.shell(\n [\"am\", \"profile\", \"start\", \"--sampling\", str(self.sampling), str(pid), self.trace_remote_file])\n else:\n self.device.adb.shell([\"am\", \"profile\", \"start\", str(pid), self.trace_remote_file])\n self.is_profiling = True\n self.profiling_pid = pid\n\n def stop(self):\n \"\"\"\n finish sending event\n \"\"\"\n self.stop_profiling()\n self.to_state = self.device.get_current_state()\n self.save2dir()\n self.save_views()\n\n def stop_profiling(self, output_dir=None):\n if self.profiling_method is None:\n return\n if not self.is_profiling:\n return\n try:\n if self.profiling_pid == -1:\n pid = self.device.get_app_pid(self.app)\n if pid is None:\n return\n self.profiling_pid = pid\n\n self.device.adb.shell([\"am\", \"profile\", \"stop\", str(self.profiling_pid)])\n if self.sampling is None:\n time.sleep(3) # guess this time can vary between machines\n\n if output_dir is None:\n if self.device.output_dir is None:\n return\n else:\n output_dir = os.path.join(self.device.output_dir, \"events\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n event_trace_local_path = \"%s/event_trace_%s.trace\" % (output_dir, self.tag)\n self.device.pull_file(self.trace_remote_file, event_trace_local_path)\n\n except Exception as e:\n self.device.logger.warning(\"profiling event failed\")\n self.device.logger.warning(e)" }, { "identifier": "UtgBasedInputPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgBasedInputPolicy(InputPolicy):\n \"\"\"\n state-based input policy\n \"\"\"\n\n def __init__(self, device, app, random_input):\n super(UtgBasedInputPolicy, self).__init__(device, app)\n self.random_input = random_input\n self.script = None\n self.master = None\n self.script_events = []\n self.last_event = None\n self.last_state = None\n self.current_state = None\n self.utg = UTG(device=device, app=app, random_input=random_input)\n self.script_event_idx = 0\n if self.device.humanoid is not None:\n self.humanoid_view_trees = []\n self.humanoid_events = []\n\n def generate_event(self, input_manager):\n \"\"\"\n generate an event\n @return:\n \"\"\"\n\n # Get current device state\n self.current_state = self.device.get_current_state()\n if self.current_state is None:\n import time\n time.sleep(5)\n return KeyEvent(name=\"BACK\")\n\n self.__update_utg()\n\n # update last view trees for humanoid\n if self.device.humanoid is not None:\n self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree]\n if len(self.humanoid_view_trees) > 4:\n self.humanoid_view_trees = self.humanoid_view_trees[1:]\n\n event = None\n\n # if the previous operation is not finished, continue\n if len(self.script_events) > self.script_event_idx:\n event = self.script_events[self.script_event_idx].get_transformed_event(self)\n self.script_event_idx += 1\n\n # First try matching a state defined in the script\n if event is None and self.script is not None:\n operation = self.script.get_operation_based_on_state(self.current_state)\n if operation is not None:\n self.script_events = operation.events\n # restart script\n event = self.script_events[0].get_transformed_event(self)\n self.script_event_idx = 1\n\n if event is None:\n old_state, event = self.generate_event_based_on_utg(input_manager)\n import time\n time.sleep(3)\n # update last events for humanoid\n if self.device.humanoid is not None:\n self.humanoid_events = self.humanoid_events + [event]\n if len(self.humanoid_events) > 3:\n self.humanoid_events = self.humanoid_events[1:]\n\n self.last_state = self.current_state if old_state is None else old_state\n self.last_event = event\n return event\n\n def __update_utg(self):\n self.utg.add_transition(self.last_event, self.last_state, self.current_state)\n\n @abstractmethod\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on UTG\n :return: InputEvent\n \"\"\"\n pass" }, { "identifier": "UtgNaiveSearchPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgNaiveSearchPolicy(UtgBasedInputPolicy):\n \"\"\"\n depth-first strategy to explore UFG (old)\n \"\"\"\n\n def __init__(self, device, app, random_input, search_method):\n super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.explored_views = set()\n self.state_transitions = set()\n self.search_method = search_method\n\n self.last_event_flag = \"\"\n self.last_event_str = None\n self.last_state = None\n\n self.preferred_buttons = [\"yes\", \"ok\", \"activate\", \"detail\", \"more\", \"access\",\n \"allow\", \"check\", \"agree\", \"try\", \"go\", \"next\"]\n\n def generate_event_based_on_utg(self):\n \"\"\"\n generate an event based on current device state\n note: ensure these fields are properly maintained in each transaction:\n last_event_flag, last_touched_view, last_state, exploited_views, state_transitions\n @return: InputEvent\n \"\"\"\n self.save_state_transition(self.last_event_str, self.last_state, self.current_state)\n\n if self.device.is_foreground(self.app):\n # the app is in foreground, clear last_event_flag\n self.last_event_flag = EVENT_FLAG_STARTED\n else:\n number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP)\n # If we have tried too many times but the app is still not started, stop DroidBot\n if number_of_starts > MAX_NUM_RESTARTS:\n raise InputInterruptedException(\"The app cannot be started.\")\n\n # if app is not started, try start it\n if self.last_event_flag.endswith(EVENT_FLAG_START_APP):\n # It seems the app stuck at some state, and cannot be started\n # just pass to let viewclient deal with this case\n self.logger.info(\"The app had been restarted %d times.\", number_of_starts)\n self.logger.info(\"Trying to restart app...\")\n pass\n else:\n start_app_intent = self.app.get_start_intent()\n\n self.last_event_flag += EVENT_FLAG_START_APP\n self.last_event_str = EVENT_FLAG_START_APP\n return IntentEvent(start_app_intent)\n\n # select a view to click\n view_to_touch = self.select_a_view(self.current_state)\n\n # if no view can be selected, restart the app\n if view_to_touch is None:\n stop_app_intent = self.app.get_stop_intent()\n self.last_event_flag += EVENT_FLAG_STOP_APP\n self.last_event_str = EVENT_FLAG_STOP_APP\n return IntentEvent(stop_app_intent)\n\n view_to_touch_str = view_to_touch['view_str']\n if view_to_touch_str.startswith('BACK'):\n result = KeyEvent('BACK')\n else:\n result = TouchEvent(view=view_to_touch)\n\n self.last_event_flag += EVENT_FLAG_TOUCH\n self.last_event_str = view_to_touch_str\n self.save_explored_view(self.current_state, self.last_event_str)\n return result\n\n def select_a_view(self, state):\n \"\"\"\n select a view in the view list of given state, let droidbot touch it\n @param state: DeviceState\n @return:\n \"\"\"\n views = []\n for view in state.views:\n if view['enabled'] and len(view['children']) == 0:\n views.append(view)\n\n if self.random_input:\n random.shuffle(views)\n\n # add a \"BACK\" view, consider go back first/last according to search policy\n mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity,\n 'text': 'BACK_%s' % state.foreground_activity}\n if self.search_method == POLICY_NAIVE_DFS:\n views.append(mock_view_back)\n elif self.search_method == POLICY_NAIVE_BFS:\n views.insert(0, mock_view_back)\n\n # first try to find a preferable view\n for view in views:\n view_text = view['text'] if view['text'] is not None else ''\n view_text = view_text.lower().strip()\n if view_text in self.preferred_buttons \\\n and (state.foreground_activity, view['view_str']) not in self.explored_views:\n self.logger.info(\"selected an preferred view: %s\" % view['view_str'])\n return view\n\n # try to find a un-clicked view\n for view in views:\n if (state.foreground_activity, view['view_str']) not in self.explored_views:\n self.logger.info(\"selected an un-clicked view: %s\" % view['view_str'])\n return view\n\n # if all enabled views have been clicked, try jump to another activity by clicking one of state transitions\n if self.random_input:\n random.shuffle(views)\n transition_views = {transition[0] for transition in self.state_transitions}\n for view in views:\n if view['view_str'] in transition_views:\n self.logger.info(\"selected a transition view: %s\" % view['view_str'])\n return view\n\n # no window transition found, just return a random view\n # view = views[0]\n # self.logger.info(\"selected a random view: %s\" % view['view_str'])\n # return view\n\n # DroidBot stuck on current state, return None\n self.logger.info(\"no view could be selected in state: %s\" % state.tag)\n return None\n\n def save_state_transition(self, event_str, old_state, new_state):\n \"\"\"\n save the state transition\n @param event_str: str, representing the event cause the transition\n @param old_state: DeviceState\n @param new_state: DeviceState\n @return:\n \"\"\"\n if event_str is None or old_state is None or new_state is None:\n return\n if new_state.is_different_from(old_state):\n self.state_transitions.add((event_str, old_state.tag, new_state.tag))\n\n def save_explored_view(self, state, view_str):\n \"\"\"\n save the explored view\n @param state: DeviceState, where the view located\n @param view_str: str, representing a view\n @return:\n \"\"\"\n if not state:\n return\n state_activity = state.foreground_activity\n self.explored_views.add((state_activity, view_str))" }, { "identifier": "UtgGreedySearchPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgGreedySearchPolicy(UtgBasedInputPolicy):\n \"\"\"\n DFS/BFS (according to search_method) strategy to explore UFG (new)\n \"\"\"\n\n def __init__(self, device, app, random_input, search_method):\n super(UtgGreedySearchPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.search_method = search_method\n\n self.preferred_buttons = [\"yes\", \"ok\", \"activate\", \"detail\", \"more\", \"access\",\n \"allow\", \"check\", \"agree\", \"try\", \"go\", \"next\"]\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n self.__num_restarts = 0\n self.__num_steps_outside = 0\n self.__event_trace = \"\"\n self.__missed_states = set()\n self.__random_explore = False\n\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n current_state = self.current_state\n self.logger.info(\"Current state: %s\" % current_state.state_str)\n if current_state.state_str in self.__missed_states:\n self.__missed_states.remove(current_state.state_str)\n\n if current_state.get_app_activity_depth(self.app) < 0:\n # If the app is not in the activity stack\n start_app_intent = self.app.get_start_intent()\n\n # It seems the app stucks at some state, has been\n # 1) force stopped (START, STOP)\n # just start the app again by increasing self.__num_restarts\n # 2) started at least once and cannot be started (START)\n # pass to let viewclient deal with this case\n # 3) nothing\n # a normal start. clear self.__num_restarts.\n\n if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \\\n or self.__event_trace.endswith(EVENT_FLAG_START_APP):\n self.__num_restarts += 1\n self.logger.info(\"The app had been restarted %d times.\", self.__num_restarts)\n else:\n self.__num_restarts = 0\n\n # pass (START) through\n if not self.__event_trace.endswith(EVENT_FLAG_START_APP):\n if self.__num_restarts > MAX_NUM_RESTARTS:\n # If the app had been restarted too many times, enter random mode\n msg = \"The app had been restarted too many times. Entering random mode.\"\n self.logger.info(msg)\n self.__random_explore = True\n else:\n # Start the app\n self.__event_trace += EVENT_FLAG_START_APP\n self.logger.info(\"Trying to start the app...\")\n return IntentEvent(intent=start_app_intent)\n\n elif current_state.get_app_activity_depth(self.app) > 0:\n # If the app is in activity stack but is not in foreground\n self.__num_steps_outside += 1\n\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:\n # If the app has not been in foreground for too long, try to go back\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:\n stop_app_intent = self.app.get_stop_intent()\n go_back_event = IntentEvent(stop_app_intent)\n else:\n go_back_event = KeyEvent(name=\"BACK\")\n self.__event_trace += EVENT_FLAG_NAVIGATE\n self.logger.info(\"Going back to the app...\")\n return go_back_event\n else:\n # If the app is in foreground\n self.__num_steps_outside = 0\n\n # Get all possible input events\n possible_events = current_state.get_possible_input()\n\n if self.random_input:\n random.shuffle(possible_events)\n\n if self.search_method == POLICY_GREEDY_DFS:\n possible_events.append(KeyEvent(name=\"BACK\"))\n elif self.search_method == POLICY_GREEDY_BFS:\n possible_events.insert(0, KeyEvent(name=\"BACK\"))\n\n # get humanoid result, use the result to sort possible events\n # including back events\n if self.device.humanoid is not None:\n possible_events = self.__sort_inputs_by_humanoid(possible_events)\n\n # If there is an unexplored event, try the event first\n for input_event in possible_events:\n if not self.utg.is_event_explored(event=input_event, state=current_state):\n self.logger.info(\"Trying an unexplored event.\")\n self.__event_trace += EVENT_FLAG_EXPLORE\n return input_event\n\n target_state = self.__get_nav_target(current_state)\n if target_state:\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state)\n if navigation_steps and len(navigation_steps) > 0:\n self.logger.info(\"Navigating to %s, %d steps left.\" % (target_state.state_str, len(navigation_steps)))\n self.__event_trace += EVENT_FLAG_NAVIGATE\n return navigation_steps[0][1]\n\n if self.__random_explore:\n self.logger.info(\"Trying random event.\")\n random.shuffle(possible_events)\n return possible_events[0]\n\n # If couldn't find a exploration target, stop the app\n stop_app_intent = self.app.get_stop_intent()\n self.logger.info(\"Cannot find an exploration target. Trying to restart app...\")\n self.__event_trace += EVENT_FLAG_STOP_APP\n return IntentEvent(intent=stop_app_intent)\n\n def __sort_inputs_by_humanoid(self, possible_events):\n if sys.version.startswith(\"3\"):\n from xmlrpc.client import ServerProxy\n else:\n from xmlrpclib import ServerProxy\n proxy = ServerProxy(\"http://%s/\" % self.device.humanoid)\n request_json = {\n \"history_view_trees\": self.humanoid_view_trees,\n \"history_events\": [x.__dict__ for x in self.humanoid_events],\n \"possible_events\": [x.__dict__ for x in possible_events],\n \"screen_res\": [self.device.display_info[\"width\"],\n self.device.display_info[\"height\"]]\n }\n result = json.loads(proxy.predict(json.dumps(request_json)))\n new_idx = result[\"indices\"]\n text = result[\"text\"]\n new_events = []\n\n # get rid of infinite recursive by randomizing first event\n if not self.utg.is_state_reached(self.current_state):\n new_first = random.randint(0, len(new_idx) - 1)\n new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0]\n\n for idx in new_idx:\n if isinstance(possible_events[idx], SetTextEvent):\n possible_events[idx].text = text\n new_events.append(possible_events[idx])\n return new_events\n\n def __get_nav_target(self, current_state):\n # If last event is a navigation event\n if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE):\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)\n if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps:\n # If last navigation was successful, use current nav target\n self.__nav_num_steps = len(navigation_steps)\n return self.__nav_target\n else:\n # If last navigation was failed, add nav target to missing states\n self.__missed_states.add(self.__nav_target.state_str)\n\n reachable_states = self.utg.get_reachable_states(current_state)\n if self.random_input:\n random.shuffle(reachable_states)\n\n for state in reachable_states:\n # Only consider foreground states\n if state.get_app_activity_depth(self.app) != 0:\n continue\n # Do not consider missed states\n if state.state_str in self.__missed_states:\n continue\n # Do not consider explored states\n if self.utg.is_state_explored(state):\n continue\n self.__nav_target = state\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)\n if len(navigation_steps) > 0:\n self.__nav_num_steps = len(navigation_steps)\n return state\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n return None" }, { "identifier": "UtgReplayPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgReplayPolicy(InputPolicy):\n \"\"\"\n Replay DroidBot output generated by UTG policy\n \"\"\"\n\n def __init__(self, device, app, replay_output):\n super(UtgReplayPolicy, self).__init__(device, app)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.replay_output = replay_output\n\n import os\n event_dir = os.path.join(replay_output, \"events\")\n self.event_paths = sorted([os.path.join(event_dir, x) for x in\n next(os.walk(event_dir))[2]\n if x.endswith(\".json\")])\n # skip HOME and start app intent\n self.device = device\n self.app = app\n self.event_idx = 2\n self.num_replay_tries = 0\n self.utg = UTG(device=device, app=app, random_input=None)\n self.last_event = None\n self.last_state = None\n self.current_state = None\n\n def generate_event(self):\n \"\"\"\n generate an event based on replay_output\n @return: InputEvent\n \"\"\"\n import time\n while self.event_idx < len(self.event_paths) and \\\n self.num_replay_tries < MAX_REPLY_TRIES:\n self.num_replay_tries += 1\n current_state = self.device.get_current_state()\n if current_state is None:\n time.sleep(5)\n self.num_replay_tries = 0\n return KeyEvent(name=\"BACK\")\n\n curr_event_idx = self.event_idx\n self.__update_utg()\n while curr_event_idx < len(self.event_paths):\n event_path = self.event_paths[curr_event_idx]\n with open(event_path, \"r\") as f:\n curr_event_idx += 1\n\n try:\n event_dict = json.load(f)\n except Exception as e:\n self.logger.info(\"Loading %s failed\" % event_path)\n continue\n\n if event_dict[\"start_state\"] != current_state.state_str:\n continue\n if not self.device.is_foreground(self.app):\n # if current app is in background, bring it to foreground\n component = self.app.get_package_name()\n if self.app.get_main_activity():\n component += \"/%s\" % self.app.get_main_activity()\n return IntentEvent(Intent(suffix=component))\n \n self.logger.info(\"Replaying %s\" % event_path)\n self.event_idx = curr_event_idx\n self.num_replay_tries = 0\n # return InputEvent.from_dict(event_dict[\"event\"])\n event = InputEvent.from_dict(event_dict[\"event\"])\n self.last_state = self.current_state\n self.last_event = event\n return event \n\n time.sleep(5)\n\n # raise InputInterruptedException(\"No more record can be replayed.\")\n def __update_utg(self):\n self.utg.add_transition(self.last_event, self.last_state, self.current_state)" }, { "identifier": "ManualPolicy", "path": "droidbot/input_policy.py", "snippet": "class ManualPolicy(UtgBasedInputPolicy):\n \"\"\"\n manually explore UFG\n \"\"\"\n\n def __init__(self, device, app):\n super(ManualPolicy, self).__init__(device, app, False)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.__first_event = True\n\n def generate_event_based_on_utg(self):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n if self.__first_event:\n self.__first_event = False\n self.logger.info(\"Trying to start the app...\")\n start_app_intent = self.app.get_start_intent()\n return IntentEvent(intent=start_app_intent)\n else:\n return ManualEvent()" }, { "identifier": "TaskPolicy", "path": "droidbot/input_policy.py", "snippet": "class TaskPolicy(UtgBasedInputPolicy):\n\n def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):\n super(TaskPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.task = task\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n self.__num_restarts = 0\n self.__num_steps_outside = 0\n self.__event_trace = \"\"\n self.__missed_states = set()\n self.__random_explore = random_input\n self.__action_history = []\n self.__thought_history = []\n self.use_memory = use_memory\n # if use_memory:\n # self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)\n if self.use_memory:\n self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()\n if not self.similar_ele_function:\n self.use_memory = False\n print('=============\\nWarning: Did not find the memory of this app, the app memory is disabled\\n=============')\n else:\n print(f'============\\nFound element: {self.similar_ele_statement}\\nPath: {self.similar_ele_path}\\nFunction: {self.similar_ele_function}\\n============')\n self.state_ele_memory = {} # memorize some important states that contain elements of insight\n\n def get_most_similar_element(self):\n from InstructorEmbedding import INSTRUCTOR\n from sklearn.metrics.pairwise import cosine_similarity\n import numpy as np\n model = INSTRUCTOR('hkunlp/instructor-xl')\n task_embedding = model.encode('task: ' + self.task).reshape(1, -1)\n\n with open('memory/node_filtered_elements.json') as file:\n ele_statements = json.load(file)\n with open('memory/element_description.json') as file:\n ele_functions = json.load(file)\n with open('memory/embedded_elements_desc.json') as file:\n embeddings = json.load(file)\n app_name = self.device.output_dir.split('/')[-1]\n if app_name not in embeddings.keys():\n return None, None, None\n app_embeddings = embeddings[app_name]\n\n # similarities = {}\n max_similarity, similar_ele_idx = -9999, -9999\n for state_str, elements in app_embeddings.items():\n # if the target element is in the first ui, no onclick is needed\n # if ele_statements[app_name][state_str]['path'] == []:\n # continue\n # similarities[state_str] = []\n for idx, ele in enumerate(elements):\n if ele:\n npele = np.array(ele).reshape(1, -1)\n similarity = cosine_similarity(task_embedding, npele)[0][0]\n else:\n similarity = -9999\n # similarities[state_str].append(similarity)\n if similarity > max_similarity:\n max_similarity = similarity\n similar_ele_idx = idx\n similar_state_str = state_str\n\n similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]\n similar_ele_path = ele_statements[app_name][similar_state_str]['path']\n similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]\n del model\n return similar_ele_path, similar_ele_desc, similar_ele\n \n def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):\n prefix_scroll_event = []\n if old_state is None:\n old_state = self.current_state \n for _ in range(MAX_SCROLL_NUM): # first scroll up to the top\n self.device.send_event(ScrollEvent(view=scroller, direction=\"UP\"))\n scrolled_state = self.device.get_current_state()\n self.utg.add_transition(ScrollEvent(view=scroller, direction=\"UP\"), old_state, scrolled_state)\n old_state = scrolled_state\n state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()\n scrolled_new_views = [] # judge whether there is a new view after scrolling\n for scrolled_view in scrolled_views:\n if scrolled_view not in all_views_for_mark:\n scrolled_new_views.append(scrolled_view)\n all_views_for_mark.append(scrolled_view)\n if len(scrolled_new_views) == 0:\n break\n\n prefix_scroll_event.append(ScrollEvent(view=scroller, direction=\"UP\"))\n return prefix_scroll_event\n\n\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n current_state = self.current_state\n self.logger.info(\"Current state: %s\" % current_state.state_str)\n if current_state.state_str in self.__missed_states:\n self.__missed_states.remove(current_state.state_str)\n\n if current_state.get_app_activity_depth(self.app) < 0:\n # If the app is not in the activity stack\n start_app_intent = self.app.get_start_intent()\n\n # It seems the app stucks at some state, has been\n # 1) force stopped (START, STOP)\n # just start the app again by increasing self.__num_restarts\n # 2) started at least once and cannot be started (START)\n # pass to let viewclient deal with this case\n # 3) nothing\n # a normal start. clear self.__num_restarts.\n\n if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \\\n or self.__event_trace.endswith(EVENT_FLAG_START_APP):\n self.__num_restarts += 1\n self.logger.info(\"The app had been restarted %d times.\", self.__num_restarts)\n else:\n self.__num_restarts = 0\n\n # pass (START) through\n if not self.__event_trace.endswith(EVENT_FLAG_START_APP):\n if self.__num_restarts > MAX_NUM_RESTARTS:\n # If the app had been restarted too many times, enter random mode\n msg = \"The app had been restarted too many times. Entering random mode.\"\n self.logger.info(msg)\n self.__random_explore = True\n else:\n # Start the app\n self.__event_trace += EVENT_FLAG_START_APP\n self.logger.info(\"Trying to start the app...\")\n # self.__action_history = [f'- start the app {self.app.app_name}']\n self.__action_history = [f'- launchApp {self.app.app_name}']\n self.__thought_history = [f'launch the app {self.app.app_name} to finish the task {self.task}']\n return None, IntentEvent(intent=start_app_intent)\n\n elif current_state.get_app_activity_depth(self.app) > 0:\n # If the app is in activity stack but is not in foreground\n self.__num_steps_outside += 1\n\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:\n # If the app has not been in foreground for too long, try to go back\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:\n stop_app_intent = self.app.get_stop_intent()\n go_back_event = IntentEvent(stop_app_intent)\n else:\n go_back_event = KeyEvent(name=\"BACK\")\n self.__event_trace += EVENT_FLAG_NAVIGATE\n self.logger.info(\"Going back to the app...\")\n self.__action_history.append('- go back')\n self.__thought_history.append('the app has not been in foreground for too long, try to go back')\n return None, go_back_event\n else:\n # If the app is in foreground\n self.__num_steps_outside = 0\n \n \n scrollable_views = current_state.get_scrollable_views()#self._get_scrollable_views(current_state)\n \n if len(scrollable_views) > 0:\n '''\n if there is at least one scroller in the screen, we scroll each scroller many times until all the screens after scrolling have been recorded, you do not need to read\n '''\n # print(scrollable_views)\n\n actions_dict = {}\n whole_state_views, whole_state_actions, whole_state_strs = [], [], []\n\n # state_strs = [current_state.state_str]\n state_prompt, current_candidate_actions, current_views, _ = current_state.get_described_actions()\n all_views_for_mark = copy.deepcopy(current_views) # just for judging whether the screen has been scrolled up to the top\n\n for scrollerid in range(len(scrollable_views)):\n scroller = scrollable_views[scrollerid]\n # prefix_scroll_event = []\n actions_dict[scrollerid] = []\n\n prefix_scroll_event = self._scroll_to_top(scroller, all_views_for_mark)\n \n # after scrolling to the top, update the current_state\n top_state = self.device.get_current_state()\n state_prompt, top_candidate_actions, top_views, _ = top_state.get_described_actions()\n all_views_without_id, all_actions = top_views, top_candidate_actions\n\n too_few_item_time = 0\n\n for _ in range(MAX_SCROLL_NUM): # then scroll down to the bottom\n whole_state_strs.append(top_state.state_str) # record the states from the top to the bottom\n self.device.send_event(ScrollEvent(view=scroller, direction=\"DOWN\"))\n scrolled_state = self.device.get_current_state()\n state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()\n \n scrolled_new_views = []\n for scrolled_view_id in range(len(scrolled_views)):\n scrolled_view = scrolled_views[scrolled_view_id]\n if scrolled_view not in all_views_without_id:\n scrolled_new_views.append(scrolled_view)\n all_views_without_id.append(scrolled_view)\n all_actions.append(prefix_scroll_event + [ScrollEvent(view=scroller, direction=\"DOWN\"), scrolled_candidate_actions[scrolled_view_id]])\n # print('found new views:', scrolled_new_views)\n if len(scrolled_new_views) == 0:\n break\n \n prefix_scroll_event.append(ScrollEvent(view=scroller, direction=\"DOWN\"))\n\n if len(scrolled_new_views) < 2:\n too_few_item_time += 1\n if too_few_item_time >= 2:\n break\n\n self.utg.add_transition(ScrollEvent(view=scroller, direction=\"DOWN\"), top_state, scrolled_state)\n top_state = scrolled_state\n \n # filter out the views that have been added to the whole_state by scrolling other scrollers\n for all_view_id in range(len(all_views_without_id)):\n view = all_views_without_id[all_view_id]\n if view not in whole_state_views:\n whole_state_views.append(view)\n whole_state_actions.append(all_actions[all_view_id])\n \n all_views_for_mark = []\n _ = self._scroll_to_top(scroller, all_views_for_mark, top_state)\n # print(whole_state_views)\n action, candidate_actions, target_view, thought = self._get_action_from_views_actions(\n views=whole_state_views, candidate_actions=whole_state_actions, state_strs=whole_state_strs, action_history=self.__action_history, thought_history=self.__thought_history)\n\n if isinstance(action, list): # the screen has to be scrolled first\n last_state = None\n for eventid in range(len(action) - 1):\n self.device.send_event(action[eventid])\n last_state = self.device.get_current_state()\n # self.__action_history.append(current_state.get_action_desc(action[eventid]))\n self.__action_history.append(current_state.get_action_descv2(action[-1], target_view))\n self.__thought_history.append(thought)\n return last_state, action[-1]\n '''\n end for dealing with scrollers\n '''\n else:\n action, candidate_actions, target_view, thought = self._get_action_from_views_actions(\n current_state=current_state, action_history=self.__action_history, thought_history=self.__thought_history, state_strs=current_state.state_str)\n \n if action == FINISHED:\n return None, FINISHED\n if action is not None:\n self.__action_history.append(current_state.get_action_descv2(action, target_view))\n self.__thought_history.append(thought)\n return None, action\n\n if self.__random_explore:\n self.logger.info(\"Trying random event.\")\n action = random.choice(candidate_actions)\n self.__action_history.append(current_state.get_action_descv2(action, target_view))\n self.__thought_history.append('random trying')\n return None, action\n\n # If couldn't find a exploration target, stop the app\n stop_app_intent = self.app.get_stop_intent()\n self.logger.info(\"Cannot find an exploration target. Trying to restart app...\")\n self.__action_history.append('- stop the app')\n self.__thought_history.append(\"couldn't find a exploration target, stop the app\")\n self.__event_trace += EVENT_FLAG_STOP_APP\n return None, IntentEvent(intent=stop_app_intent)\n \n def _save2yaml(self, file_name, state_prompt, idx, state_str, inputs='null'):\n if not os.path.exists(file_name):\n tmp_data = {\n 'task_name': self.task,\n 'step_num': 0,\n 'records': []\n }\n with open(file_name, 'w', encoding='utf-8') as f:\n yaml.dump(tmp_data, f)\n\n with open(file_name, 'r', encoding='utf-8') as f:\n old_yaml_data = yaml.safe_load(f)\n \n new_records = old_yaml_data['records']\n new_records.append(\n {'State': state_prompt,\n 'Choice': idx,\n 'Input': inputs,\n 'state_str': state_str}\n )\n # import pdb;pdb.set_trace()\n data = {\n 'task_name': self.task,\n 'step_num': len(list(old_yaml_data['records'])),\n 'records': new_records\n }\n with open(file_name, 'w', encoding='utf-8') as f:\n yaml.dump(data, f)\n def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):\n if self.use_memory:\n # if isinstance(state_str, list):\n # if len(state_str) == 1:\n # state_str = state_str[0]\n # else:\n # state_str = self.memory.hash_state(state_prompt)\n # new_state_prompt = self.f(action_history, state_prompt, state_str)\n # if new_state_prompt !z= None and new_state_prompt != 'no_description':\n # state_prompt = new_state_prompt\n if len(action_history) <= len(self.similar_ele_path):\n current_ui_id = len(action_history) - 1\n new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)\n if new_state_prompt != state_prompt: # current state contains an element of insight\n self.state_ele_memory[state_str] = new_state_prompt\n state_prompt = new_state_prompt\n # elif state_str in self.state_ele_memory.keys():\n # state_prompt = self.state_ele_memory[state_str]\n\n if use_thoughts:\n history_with_thought = []\n for idx in range(len(action_history)):\n history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])\n else:\n history_with_thought = action_history\n\n\n return '\\n'.join(history_with_thought),state_prompt\n def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):\n if self.use_memory:\n # if isinstance(state_str, list):\n # if len(state_str) == 1:\n # state_str = state_str[0]\n # else:\n # state_str = self.memory.hash_state(state_prompt)\n # new_state_prompt = self.f(action_history, state_prompt, state_str)\n # if new_state_prompt !z= None and new_state_prompt != 'no_description':\n # state_prompt = new_state_prompt\n if len(action_history) <= len(self.similar_ele_path):\n current_ui_id = len(action_history) - 1\n new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)\n if new_state_prompt != state_prompt: # current state contains an element of insight\n self.state_ele_memory[state_str] = new_state_prompt\n state_prompt = new_state_prompt\n # elif state_str in self.state_ele_memory.keys():\n # state_prompt = self.state_ele_memory[state_str]\n\n if use_thoughts:\n history_with_thought = []\n for idx in range(len(action_history)):\n history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])\n else:\n history_with_thought = action_history\n\n introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.'''\n task_prompt = 'Task: ' + self.task\n history_prompt = 'Previous UI actions: \\n' + '\\n'.join(history_with_thought)\n full_state_prompt = 'Current UI state: \\n' + state_prompt\n request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>'''\n prompt = introduction + '\\n' + task_prompt + '\\n' + history_prompt + '\\n' + full_state_prompt + '\\n' + request_prompt\n return prompt\n \n def _extract_input_text(self, string, start='Text: ', end=' Thought'):\n start_index = string.find(start) + len(start) # Find the location of 'start'\n if start_index == -1:\n start_index = 0\n end_index = string.find(end) # Find the location of 'end'\n substring = string[start_index:end_index] if end_index != -1 else string[start_index:]\n return substring\n \n def _extract_input_textv2(self, string):\n if string[:11] == 'InputText: ':\n return string[11:]\n else:\n return string\n \n def _get_text_view_description(self, view):\n content_description = safe_dict_get(view, 'content_description', default='')\n view_text = safe_dict_get(view, 'text', default='')\n\n view_desc = f\"<input class='&'>#</input>\"#.replace('&', view_class)#.replace('#', text)\n if view_text:\n view_desc = view_desc.replace('#', view_text)\n else:\n view_desc = view_desc.replace('#', '')\n if content_description:\n view_desc = view_desc.replace('&', content_description)\n else:\n view_desc = view_desc.replace(\" class='&'\", \"\")\n return view_desc\n\n def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None):\n '''\n get action choice from LLM based on a list of views and corresponding actions\n '''\n if current_state:\n state_prompt, candidate_actions, _, _ = current_state.get_described_actions()\n state_str = current_state.state_str\n if USE_LMQL:\n history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,\n thought_history=thought_history) \n else:\n prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)\n else:\n views_with_id = []\n for id in range(len(views)):\n views_with_id.append(tools.insert_id_into_view(views[id], id))\n state_prompt = '\\n'.join(views_with_id)\n state_str = tools.hash_string(state_prompt)\n if USE_LMQL:\n history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,\n thought_history=thought_history) \n else:\n prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)\n\n # ids = [str(idx) for idx, i in enumerate(candidate_actions)]\n ids = str([i for i in range(len(candidate_actions))])\n \n if USE_LMQL:\n idx, action_type, input_text=prompt_llm_with_history(task=self.task, history=history, ui_desc=state_prompt, ids=ids)\n else:\n print('********************************** prompt: **********************************')\n print(prompt)\n print('********************************** end of prompt **********************************')\n response = tools.query_gpt(prompt)\n \n print(f'response: {response}')\n idx, action_type, input_text = tools.extract_action(response)\n\n file_name = self.device.output_dir +'/'+ self.task.replace('\"', '_').replace(\"'\", '_') + '.yaml' #str(str(time.time()).replace('.', ''))\n idx = int(idx)\n selected_action = candidate_actions[idx]\n \n selected_view_description = tools.get_item_properties_from_id(ui_state_desc=state_prompt, view_id=idx)\n thought = ''# tools.get_thought(response)\n\n if isinstance(selected_action, SetTextEvent):\n if input_text != \"N/A\" and input_text != None:\n selected_action.text = input_text.replace('\"', '').replace(' ', '-')\n if len(selected_action.text) > 30: # heuristically disable long text input\n selected_action.text = ''\n else:\n selected_action.text = ''\n self._save2yaml(file_name, state_prompt, idx, state_strs, inputs=selected_action.text)\n else:\n self._save2yaml(file_name, state_prompt, idx, state_strs, inputs='null')\n return selected_action, candidate_actions, selected_view_description, thought\n\n def _insert_predictions_into_state_prompt(self, state_prompt, current_state_item_descriptions):\n state_prompt_list = state_prompt.split('>\\n')\n item_list = []\n for view_desc in state_prompt_list:\n if view_desc[0] == ' ':\n view_desc = view_desc[1:]\n if view_desc[-1] != '>':\n view_desc = view_desc + '>'\n view_desc_without_id = tools.get_view_without_id(view_desc)\n if view_desc_without_id in current_state_item_descriptions.keys():\n prediction = 'title=' + current_state_item_descriptions[view_desc_without_id]\n view_desc_list = view_desc.split(' ', 2)\n if len(view_desc_list) > 2: # for example, <button id=3 class='More options' checked=False></button>\n inserted_view = view_desc_list[0] + ' ' + view_desc_list[1] + ' ' + prediction + ' ' + view_desc_list[2]\n else: # for example, <p id=4>June</p>\n latter_part = view_desc_list[1].split('>', 1)\n inserted_view = view_desc_list[0] + ' ' + latter_part[0] + ' ' + prediction + '>' + latter_part[1]\n if inserted_view[-1] != '>':\n inserted_view += '>'\n item_list.append(inserted_view)\n else:\n item_list.append(view_desc)\n return '\\n'.join(item_list)\n\n def _get_item_prediction(self, action_history, state_prompt, state_str):\n '''\n find the most match history_state in memory_graph based on action_history. \n match the current items in device_state with the history items in history_state, \n return the predicted screen after touching the item\n if can not find the device_state not in action_history, return None, can decide whether to explore\n '''\n def parse_history_views(history):\n parsed_views = []\n for history_action in history:\n history_action_list = history_action.split(': ', 1)\n if 'launchApp' in history_action:\n return []\n latter_part = history_action_list[1]\n if ' InputText:' in latter_part:\n target_view = latter_part.split(' InputText:', 1)[0]\n elif ' Reason:' in latter_part:\n target_view = latter_part.split(' Reason:', 1)[0]\n else:\n target_view = latter_part\n parsed_views.append(target_view)\n return parsed_views\n \n action_history = parse_history_views(action_history[1:]) # ignore the first action, which is launching the app\n \n # search the current state str in memory based on history actions\n current_state_str = self.memory.get_first_state_str()\n next_state_str = None\n for actionid in range(0, len(action_history)):\n actioned_view = action_history[actionid] #action_history[actionid].rsplit('.', 1)[0]\n next_state_str = self.memory.get_successor_by_node_edge(current_state_str, actioned_view)\n current_state_str = next_state_str\n # the past actions have lead to a state that does not exist in the memory\n if next_state_str == None:\n break\n if next_state_str == None:\n current_state_str = state_str\n # now, current_state_str is the current device state string, we should add all its successors' information into the items on this device state\n current_state_item_descriptions = self.memory.get_predictions_of_items(current_state_str)\n # import pdb;pdb.set_trace()\n if current_state_item_descriptions is None:\n return 'no_description' # there is no description of the current state, either it is the leaf node or it was not explored\n # import pdb;pdb.set_trace()\n return self._insert_predictions_into_state_prompt(state_prompt, current_state_item_descriptions)" }, { "identifier": "POLICY_NAIVE_DFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_NAIVE_DFS = \"dfs_naive\"" }, { "identifier": "POLICY_GREEDY_DFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_GREEDY_DFS = \"dfs_greedy\"" }, { "identifier": "POLICY_NAIVE_BFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_NAIVE_BFS = \"bfs_naive\"" }, { "identifier": "POLICY_GREEDY_BFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_GREEDY_BFS = \"bfs_greedy\"" }, { "identifier": "POLICY_REPLAY", "path": "droidbot/input_policy.py", "snippet": "POLICY_REPLAY = \"replay\"" }, { "identifier": "POLICY_MEMORY_GUIDED", "path": "droidbot/input_policy.py", "snippet": "POLICY_MEMORY_GUIDED = \"memory_guided\" # implemented in input_policy2" }, { "identifier": "POLICY_MANUAL", "path": "droidbot/input_policy.py", "snippet": "POLICY_MANUAL = \"manual\"" }, { "identifier": "POLICY_MONKEY", "path": "droidbot/input_policy.py", "snippet": "POLICY_MONKEY = \"monkey\"" }, { "identifier": "POLICY_NONE", "path": "droidbot/input_policy.py", "snippet": "POLICY_NONE = \"none\"" }, { "identifier": "POLICY_TASK", "path": "droidbot/input_policy.py", "snippet": "POLICY_TASK = \"task\"" } ]
import json import logging import subprocess import time from .input_event import EventLog from .input_policy import UtgBasedInputPolicy, UtgNaiveSearchPolicy, UtgGreedySearchPolicy, \ UtgReplayPolicy, \ ManualPolicy, TaskPolicy, \ POLICY_NAIVE_DFS, POLICY_GREEDY_DFS, \ POLICY_NAIVE_BFS, POLICY_GREEDY_BFS, \ POLICY_REPLAY, POLICY_MEMORY_GUIDED, \ POLICY_MANUAL, POLICY_MONKEY, POLICY_NONE, POLICY_TASK from .input_script import DroidBotScript from .input_policy2 import MemoryGuidedPolicy
13,575
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]: input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name == POLICY_MEMORY_GUIDED: input_policy = MemoryGuidedPolicy(device, app, self.random_input) elif self.policy_name == POLICY_REPLAY: input_policy = UtgReplayPolicy(device, app, self.replay_output) elif self.policy_name == POLICY_MANUAL: input_policy = ManualPolicy(device, app)
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]: input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name == POLICY_MEMORY_GUIDED: input_policy = MemoryGuidedPolicy(device, app, self.random_input) elif self.policy_name == POLICY_REPLAY: input_policy = UtgReplayPolicy(device, app, self.replay_output) elif self.policy_name == POLICY_MANUAL: input_policy = ManualPolicy(device, app)
elif self.policy_name == POLICY_TASK:
16
2023-10-23 03:32:58+00:00
16k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n : int\n Number of desired interpolation points.\n sx : ndarray or None\n Shift in x to evaluate at. If original data is f(x), interpolates to f(x + sx)\n dx : float\n Spacing of source points\n\n Returns\n -------\n fi : ndarray, shape(n, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft(f, axis=0)\n nx = c.shape[0]\n if sx is not None:\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n c = (c[None].T * sx).T\n c = jnp.moveaxis(c, 0, -1)\n pad = ((n - nx) // 2, n - nx - (n - nx) // 2)\n if nx % 2 != 0:\n pad = pad[::-1]\n c = jnp.fft.ifftshift(_pad_along_axis(jnp.fft.fftshift(c, axes=0), pad, axis=0))\n return jnp.fft.fft(c, axis=0).real" }, { "identifier": "fft_interp2d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=(\"n1\", \"n2\"))\ndef fft_interp2d(\n f: jax.Array,\n n1: int,\n n2: int,\n sx: jax.Array = None,\n sy: jax.Array = None,\n dx: float = 1.0,\n dy: float = 1.0,\n):\n \"\"\"Interpolation of a 2d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ny, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n1, n2 : int\n Number of desired interpolation points in x and y directions\n sx, sy : ndarray or None\n Shift in x and y to evaluate at. If original data is f(x,y), interpolates to\n f(x + sx, y + sy). Both must be provided or None\n dx, dy : float\n Spacing of source points in x and y\n\n Returns\n -------\n fi : ndarray, shape(n1, n2, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft2(f, axes=(0, 1))\n nx, ny = c.shape[:2]\n if (sx is not None) and (sy is not None):\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n sy = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(ny)[:, None] * sy / dy)\n c = (c[None].T * sx[None, :, :] * sy[:, None, :]).T\n c = jnp.moveaxis(c, 0, -1)\n padx = ((n1 - nx) // 2, n1 - nx - (n1 - nx) // 2)\n pady = ((n2 - ny) // 2, n2 - ny - (n2 - ny) // 2)\n if nx % 2 != 0:\n padx = padx[::-1]\n if ny % 2 != 0:\n pady = pady[::-1]\n\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=0), padx, axis=0), axes=0\n )\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=1), pady, axis=1), axes=1\n )\n\n return jnp.fft.fft2(c, axes=(0, 1)).real" }, { "identifier": "Interpolator1D", "path": "interpax/_spline.py", "snippet": "class Interpolator1D(eqx.Module):\n \"\"\"Convenience class for representing a 1D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n ):\n x, f = map(jnp.asarray, (x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n\n self.derivs = {\"fx\": fx}\n\n def __call__(self, xq: jax.Array, dx: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n Query points where interpolation is desired\n dx : int >= 0\n Derivative to take.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp1d(\n xq,\n self.x,\n self.f,\n self.method,\n dx,\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator2D", "path": "interpax/_spline.py", "snippet": "class Interpolator2D(eqx.Module):\n \"\"\"Convenience class for representing a 2D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, f = map(jnp.asarray, (x, y, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.y = y\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n\n self.derivs = {\"fx\": fx, \"fy\": fy, \"fxy\": fxy}\n\n def __call__(self, xq: jax.Array, yq: jax.Array, dx: int = 0, dy: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq : ndarray, shape(Nq,)\n x, y query points where interpolation is desired\n dx, dy : int >= 0\n Derivative to take in x, y directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp2d(\n xq,\n yq,\n self.x,\n self.y,\n self.f,\n self.method,\n (dx, dy),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator3D", "path": "interpax/_spline.py", "snippet": "class Interpolator3D(eqx.Module):\n \"\"\"Convenience class for representing a 3D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n z: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, z, f = map(jnp.asarray, (x, y, z, f))\n axis = kwargs.get(\"axis\", 0)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n self.x = x\n self.y = y\n self.z = z\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n\n self.derivs = {\n \"fx\": fx,\n \"fy\": fy,\n \"fz\": fz,\n \"fxy\": fxy,\n \"fxz\": fxz,\n \"fyz\": fyz,\n \"fxyz\": fxyz,\n }\n\n def __call__(\n self,\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n dx: int = 0,\n dy: int = 0,\n dz: int = 0,\n ):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq, zq : ndarray, shape(Nq,)\n x, y, z query points where interpolation is desired\n dx, dy, dz : int >= 0\n Derivative to take in x, y, z directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp3d(\n xq,\n yq,\n zq,\n self.x,\n self.y,\n self.z,\n self.f,\n self.method,\n (dx, dy, dz),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "interp1d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp1d(\n xq: jax.Array,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 1d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n query points where interpolation is desired\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n derivative : int >= 0\n derivative order to calculate\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, f data, recommend using Interpolator1D\n which caches the calculation of the derivatives and spline coefficients.\n\n \"\"\"\n xq, x, f = map(jnp.asarray, (xq, x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n outshape = xq.shape + f.shape[1:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq = jnp.atleast_1d(xq)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n lowx, highx = _parse_extrap(extrap, 1)\n\n if period is not None:\n xq, x, f, fx = _make_periodic(xq, x, period, axis, f, fx)\n lowx = highx = True\n\n if method == \"nearest\":\n\n def derivative0():\n i = jnp.argmin(jnp.abs(xq[:, np.newaxis] - x[np.newaxis]), axis=1)\n return f[i]\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1])\n\n elif method == \"linear\":\n\n def derivative0():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n delta = xq - x[i - 1]\n fq = jnp.where(\n (dx == 0),\n jnp.take(f, i, axis).T,\n jnp.take(f, i - 1, axis).T + (delta * dxi * df.T),\n ).T\n return fq\n\n def derivative1():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n return (df.T * dxi).T\n\n def derivative2():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1, derivative2])\n\n elif method in (CUBIC_METHODS + (\"monotonic\", \"monotonic-0\")):\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n assert fx.shape == f.shape\n\n dx = x[i] - x[i - 1]\n delta = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n t = delta * dxi\n\n f0 = jnp.take(f, i - 1, axis)\n f1 = jnp.take(f, i, axis)\n fx0 = (jnp.take(fx, i - 1, axis).T * dx).T\n fx1 = (jnp.take(fx, i, axis).T * dx).T\n\n F = jnp.stack([f0, f1, fx0, fx1], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_CUBIC, F).T\n ttx = _get_t_der(t, derivative, dxi)\n fq = jnp.einsum(\"ji...,ij->i...\", coef, ttx)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n return fq.reshape(outshape)" }, { "identifier": "interp2d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp2d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 2d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0 or array-like, shape(2,)\n derivative order to calculate in x, y. Use a single value for the same in both\n directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, f data, recommend using\n Interpolator2D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, x, y, f = map(jnp.asarray, (xq, yq, x, y, f))\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n xq, yq = jnp.broadcast_arrays(xq, yq)\n outshape = xq.shape + f.shape[2:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq = map(jnp.atleast_1d, (xq, yq))\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n periodx, periody = _parse_ndarg(period, 2)\n derivative_x, derivative_y = _parse_ndarg(derivative, 2)\n lowx, highx, lowy, highy = _parse_extrap(extrap, 2)\n\n if periodx is not None:\n xq, x, f, fx, fy, fxy = _make_periodic(xq, x, periodx, 0, f, fx, fy, fxy)\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fxy = _make_periodic(yq, y, periody, 1, f, fx, fy, fxy)\n lowy = highy = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 4 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n neighbors_x = jnp.array(\n [[x[i], x[i - 1], x[i], x[i - 1]], [y[j], y[j], y[j - 1], y[j - 1]]]\n )\n neighbors_f = jnp.array(\n [f[i, j].T, f[i - 1, j].T, f[i, j - 1].T, f[i - 1, j - 1].T]\n )\n xyq = jnp.array([xq, yq])\n dist = jnp.linalg.norm(neighbors_x - xyq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[2:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0), derivative0, derivative1\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n f00 = f[i - 1, j - 1]\n f01 = f[i - 1, j]\n f10 = f[i, j - 1]\n f11 = f[i, j]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n F = jnp.array([[f00, f01], [f10, f11]])\n fq = (dxi * dyi * jnp.einsum(\"ijk...,ik,jk->k...\", F, tx, ty).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n assert fx.shape == fy.shape == fxy.shape == f.shape\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fxy\"] = fxy\n fsq = OrderedDict()\n for ff in fs.keys():\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_BICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, *coef.shape[1:]), order=\"F\"), 2, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n fq = jnp.einsum(\"ijk...,ij,ik->i...\", coef, ttx, tty)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n\n return fq.reshape(outshape)" }, { "identifier": "interp3d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp3d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 3d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n zq : ndarray, shape(Nq,)\n z query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0, array-like, shape(3,)\n derivative order to calculate in x,y,z directions. Use a single value for the\n same in all directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions for\n [[xlow, xhigh],[ylow,yhigh],[zlow,zhigh]]\n period : float > 0, None, array-like, shape(3,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in all directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, z, f data, recommend using\n Interpolator3D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, zq, x, y, z, f = map(jnp.asarray, (xq, yq, zq, x, y, z, f))\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n xq, yq, zq = jnp.broadcast_arrays(xq, yq, zq)\n outshape = xq.shape + f.shape[3:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq, zq = map(jnp.atleast_1d, (xq, yq, zq))\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n periodx, periody, periodz = _parse_ndarg(period, 3)\n derivative_x, derivative_y, derivative_z = _parse_ndarg(derivative, 3)\n lowx, highx, lowy, highy, lowz, highz = _parse_extrap(extrap, 3)\n\n if periodx is not None:\n xq, x, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n xq, x, periodx, 0, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n yq, y, periody, 1, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowy = highy = True\n if periodz is not None:\n zq, z, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n zq, z, periodz, 2, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowz = highz = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 8 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n neighbors_x = jnp.array(\n [\n [x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1]],\n [y[j], y[j], y[j - 1], y[j - 1], y[j], y[j], y[j - 1], y[j - 1]],\n [z[k], z[k], z[k], z[k], z[k - 1], z[k - 1], z[k - 1], z[k - 1]],\n ]\n )\n neighbors_f = jnp.array(\n [\n f[i, j, k].T,\n f[i - 1, j, k].T,\n f[i, j - 1, k].T,\n f[i - 1, j - 1, k].T,\n f[i, j, k - 1].T,\n f[i - 1, j, k - 1].T,\n f[i, j - 1, k - 1].T,\n f[i - 1, j - 1, k - 1].T,\n ]\n )\n xyzq = jnp.array([xq, yq, zq])\n dist = jnp.linalg.norm(neighbors_x - xyzq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[3:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0) & (derivative_z == 0),\n derivative0,\n derivative1,\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n f000 = f[i - 1, j - 1, k - 1]\n f001 = f[i - 1, j - 1, k]\n f010 = f[i - 1, j, k - 1]\n f100 = f[i, j - 1, k - 1]\n f110 = f[i, j, k - 1]\n f011 = f[i - 1, j, k]\n f101 = f[i, j - 1, k]\n f111 = f[i, j, k]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n z0 = z[k - 1]\n z1 = z[k]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n dz = z1 - z0\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n dz0 = lambda: jnp.array([z1 - zq, zq - z0])\n dz1 = lambda: jnp.array([-jnp.ones_like(zq), jnp.ones_like(zq)])\n dz2 = lambda: jnp.zeros((2, zq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n tz = jax.lax.switch(derivative_z, [dz0, dz1, dz2])\n\n F = jnp.array([[[f000, f001], [f010, f011]], [[f100, f101], [f110, f111]]])\n fq = (dxi * dyi * dzi * jnp.einsum(\"lijk...,lk,ik,jk->k...\", F, tx, ty, tz).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n assert (\n fx.shape\n == fy.shape\n == fz.shape\n == fxy.shape\n == fxz.shape\n == fyz.shape\n == fxyz.shape\n == f.shape\n )\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n dz = z[k] - z[k - 1]\n deltaz = zq - z[k - 1]\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n tz = deltaz * dzi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fz\"] = fz\n fs[\"fxy\"] = fxy\n fs[\"fxz\"] = fxz\n fs[\"fyz\"] = fyz\n fs[\"fxyz\"] = fxyz\n fsq = OrderedDict()\n for ff in fs.keys():\n for kk in [0, 1]:\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj) + str(kk)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n if \"z\" in ff:\n fsq[s] = (dz * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_TRICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order=\"F\"), 3, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n ttz = _get_t_der(tz, derivative_z, dzi)\n fq = jnp.einsum(\"lijk...,li,lj,lk->l...\", coef, ttx, tty, ttz)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n fq = _extrap(zq, fq, z, lowz, highz)\n\n return fq.reshape(outshape)" } ]
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
13,196
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs)
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs)
interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq)
2
2023-10-18 13:12:20+00:00
16k
apple/ml-nvas3d
demo/generate_demo_video.py
[ { "identifier": "convolve_moving_receiver", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution between an audio signal and moving impulse responses (IRs).\n\n Args:\n - source_audio: Source audio of shape (audio_len,)\n - rirs: RIRs of shape (num_positions, num_channels, ir_length)\n - interp_index: Indices representing the start positions for interpolation of shape (audio_len,).\n - interp_weight: Weight values for linear interpolation of shape (audio_len,).\n\n Returns:\n - Convolved audio signal of shape (num_channels, audio_len)\n \"\"\"\n\n num_channels = rirs.shape[1]\n audio_len = source_audio.shape[0]\n\n # Perform convolution for each position and channel\n convolved_audios = oaconvolve(source_audio[None, None, :], rirs, axes=-1)[..., :audio_len]\n\n # NumPy fancy indexing and broadcasting for interpolation\n start_audio = convolved_audios[interp_index, np.arange(num_channels)[:, None], np.arange(audio_len)]\n end_audio = convolved_audios[interp_index + 1, np.arange(num_channels)[:, None], np.arange(audio_len)]\n interp_weight = interp_weight[None, :]\n\n # Apply linear interpolation\n moving_audio = (1 - interp_weight) * start_audio + interp_weight * end_audio\n\n return moving_audio" }, { "identifier": "setup_dynamic_interp", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def setup_dynamic_interp(\n receiver_position: np.ndarray,\n total_samples: int,\n) -> T.Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Setup moving path with a constant speed for a receiver, given its positions in 3D space.\n\n Args:\n - receiver_position: Receiver positions in 3D space of shape (num_positions, 3).\n - total_samples: Total number of samples in the audio.\n\n Returns:\n - interp_index: Indices representing the start positions for interpolation.\n - interp_weight: Weight values for linear interpolation.\n \"\"\"\n\n # Calculate the number of samples per interval\n distance = np.linalg.norm(np.diff(receiver_position, axis=0), axis=1)\n speed_per_sample = distance.sum() / total_samples\n samples_per_interval = np.round(distance / speed_per_sample).astype(int)\n\n # Distribute rounding errors\n error = total_samples - samples_per_interval.sum()\n for i in np.random.choice(len(samples_per_interval), abs(error)):\n samples_per_interval[i] += np.sign(error)\n\n # Calculate indices and weights for linear interpolation\n interp_index = np.repeat(np.arange(len(distance)), samples_per_interval)\n interp_weight = np.concatenate([np.linspace(0, 1, num, endpoint=False) for num in samples_per_interval])\n\n return interp_index, interp_weight.astype(np.float32)" }, { "identifier": "clip_two", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_two(audio1, audio2):\n \"\"\"\n Clips two audio signals to the same length.\n\n Args:\n audio1: First audio signal.\n audio2: Second audio signal.\n\n Returns: \n - Two audio signals of the same length.\n \"\"\"\n\n length_diff = audio1.shape[-1] - audio2.shape[-1]\n\n if length_diff == 0:\n return audio1, audio2\n elif length_diff > 0:\n audio1 = audio1[..., :audio2.shape[-1]]\n elif length_diff < 0:\n audio2 = audio2[..., :audio1.shape[-1]]\n\n return audio1, audio2" }, { "identifier": "clip_all", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_all(audio_list):\n \"\"\"\n Clips all audio signals in a list to the same length.\n\n Args: \n audio_list: List of audio signals.\n\n Returns: \n - List of audio signals of the same length.\n \"\"\"\n\n min_length = min(audio.shape[-1] for audio in audio_list)\n clipped_audio_list = []\n for audio in audio_list:\n clipped_audio = audio[..., :min_length]\n clipped_audio_list.append(clipped_audio)\n\n return clipped_audio_list" }, { "identifier": "create_scene", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def create_scene(room: str,\n receiver_position: T.Tuple[float, float, float] = [0.0, 0.0, 0.0],\n sample_rate: float = 48000,\n image_size: T.Tuple[int, int] = (512, 256),\n include_visual_sensor: bool = True,\n hfov: float = 90.0\n ) -> Scene:\n \"\"\"\n Create a soundspaces scene to render IR.\n \"\"\"\n\n # Note: Make sure mp3d room is downloaded\n with suppress_stdout_and_stderr():\n # Create a receiver\n receiver = Receiver(\n position=receiver_position,\n rotation=0,\n sample_rate=sample_rate\n )\n\n scene = Scene(\n room,\n [None], # placeholder for source class\n receiver=receiver,\n include_visual_sensor=include_visual_sensor,\n add_source_mesh=False,\n device=torch.device('cpu'),\n add_source=False,\n image_size=image_size,\n hfov=hfov\n )\n\n return scene" }, { "identifier": "render_rir_parallel", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def render_rir_parallel(room_list: T.List[str],\n source_position_list: T.List[T.Tuple[float, float, float]],\n receiver_position_list: T.List[T.Tuple[float, float, float]],\n filename_list: T.List[str] = None,\n receiver_rotation_list: T.List[float] = None,\n batch_size: int = 64,\n sample_rate: float = 48000,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ) -> T.List[torch.Tensor]:\n \"\"\"\n Run render_ir parallely for all elements of zip(source_position_list, receiver_position_list).\n \"\"\"\n\n assert len(room_list) == len(source_position_list)\n assert len(source_position_list) == len(receiver_position_list)\n\n if filename_list is None:\n is_return = True\n else:\n is_return = False\n\n if receiver_rotation_list is None:\n receiver_rotation_list = [0] * len(receiver_position_list)\n\n # Note: Make sure all rooms are downloaded\n\n # Calculate the number of batches\n num_points = len(source_position_list)\n num_batches = (num_points + batch_size - 1) // batch_size\n\n # Use tqdm to display the progress bar\n progress_bar = tqdm(total=num_points)\n\n def update_progress(*_):\n progress_bar.update()\n\n ir_list = []\n # Process the tasks in batches\n for batch_idx in range(num_batches):\n # Calculate the start and end indices of the current batch\n start_idx = batch_idx * batch_size\n end_idx = min(start_idx + batch_size, num_points)\n if is_return:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], None, receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n else:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], filename_list[i], receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n\n # Create a multiprocessing Pool for the current batch\n with multiprocessing.Pool() as pool:\n tasks = []\n for room, source_position, receiver_position, filename, receiver_rotation in batch:\n # Apply async mapping of process_ir function\n task = pool.apply_async(render_ir, args=(room, source_position, receiver_position, filename, receiver_rotation, sample_rate, use_default_material, channel_type, channel_order), callback=update_progress)\n tasks.append(task)\n\n # Wait for all tasks in the batch to complete and collect results\n for task in tasks:\n if is_return:\n ir = task.get() # Block until the result is ready\n ir_list.append(ir) # Append the result to the list\n else:\n task.get()\n if is_return:\n return ir_list" }, { "identifier": "load_room_grid", "path": "soundspaces_nvas3d/utils/aihabitat_utils.py", "snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info" }, { "identifier": "Receiver", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Receiver:\n \"\"\"\n Receiver for SoundSpaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n sample_rate: float = 48000,\n ):\n\n self.position = position\n self.rotation = rotation\n self.sample_rate = sample_rate" }, { "identifier": "Source", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Source:\n \"\"\"\n Source for Soundspaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n dry_sound: str,\n mesh: str,\n device: torch.device\n ):\n\n self.position = position\n self.rotation = rotation\n self.device = device # where to store dry_sound\n self.dry_sound = dry_sound\n self.mesh = mesh" }, { "identifier": "Scene", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Scene:\n \"\"\"\n Soundspaces scene including room, receiver, and source list\n \"\"\"\n\n def __init__(self,\n room: str,\n source_name_list: T.List[str],\n receiver: Receiver = None,\n source_list: T.List[Source] = None,\n include_visual_sensor: bool = True,\n add_source_mesh: bool = True,\n device: torch.device = torch.device('cpu'),\n add_source: bool = True,\n image_size: T.Tuple[int, int] = (512, 256),\n hfov: float = 90.0,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ):\n\n # Set scene\n self.room = room\n self.n_sources = len(source_name_list)\n assert self.n_sources > 0\n self.receiver = receiver\n self.source_list = source_list\n self.source_current = None\n self.include_visual_sensor = include_visual_sensor\n self.add_source_mesh = add_source_mesh\n self.device = device # where to store IR\n\n # Set channel config for soundspaces\n self.channel = {}\n self.channel['type'] = channel_type\n self.channel['order'] = channel_order\n if channel_type == 'Ambisonics':\n self.channel_count = (self.channel['order'] + 1)**2\n elif channel_type == 'Binaural':\n self.channel_count = 2\n\n # Set aihabitat config for soundspaces\n self.aihabitat = {}\n self.aihabitat['default_agent'] = 0\n self.aihabitat['sensor_height'] = 1.5\n self.aihabitat['height'] = image_size[0]\n self.aihabitat['width'] = image_size[1]\n self.aihabitat['hfov'] = hfov\n\n # Set acoustics config for soundspaces\n self.acoustic_config = {}\n self.acoustic_config['sampleRate'] = 48000\n self.acoustic_config['direct'] = True\n self.acoustic_config['indirect'] = True\n self.acoustic_config['diffraction'] = True\n self.acoustic_config['transmission'] = True\n self.acoustic_config['directSHOrder'] = 5\n self.acoustic_config['indirectSHOrder'] = 3\n self.acoustic_config['unitScale'] = 1\n self.acoustic_config['frequencyBands'] = 32\n self.acoustic_config['indirectRayCount'] = 50000\n\n # Set audio material\n if use_default_material:\n self.audio_material = './data/material/mp3d_material_config_default.json'\n else:\n self.audio_material = './data/material/mp3d_material_config.json'\n\n # Create simulation\n self.create_scene()\n\n # Randomly set source and receiver position\n source_position, source_rotation = None, None\n receiver_position, receiver_rotation = None, None\n\n # Create receiver (inside the room)\n if self.receiver is None:\n # random receiver\n self.create_receiver(receiver_position, receiver_rotation)\n else:\n # input receiver\n self.update_receiver(self.receiver)\n\n if add_source:\n # Create source\n if self.source_list is None:\n # random source\n self.source_list = [None] * self.n_sources\n for source_id, source_name in enumerate(source_name_list):\n self.create_source(source_name, source_id, source_position, source_rotation)\n else:\n # input source\n for source_id, _ in enumerate(source_name_list):\n self.update_source(self.source_list[source_id], source_id)\n\n def create_scene(self):\n \"\"\"\n Given the configuration, create a scene for soundspaces\n \"\"\"\n\n # Set backend configuration\n backend_cfg = habitat_sim.SimulatorConfiguration()\n backend_cfg.scene_id = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.glb'\n backend_cfg.scene_dataset_config_file = './data/scene_datasets/mp3d/mp3d.scene_dataset_config.json'\n backend_cfg.load_semantic_mesh = True\n backend_cfg.enable_physics = False\n\n # Set agent configuration\n agent_config = habitat_sim.AgentConfiguration()\n\n if self.include_visual_sensor:\n # Set color sensor\n rgb_sensor_spec = habitat_sim.CameraSensorSpec()\n rgb_sensor_spec.uuid = \"color_sensor\"\n rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR\n rgb_sensor_spec.resolution = [self.aihabitat['height'], self.aihabitat['width']]\n rgb_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n rgb_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n rgb_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications = [rgb_sensor_spec]\n\n # Set depth sensor\n depth_sensor_spec = habitat_sim.CameraSensorSpec()\n depth_sensor_spec.uuid = \"depth_sensor\"\n depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH\n depth_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n depth_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n depth_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n depth_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications.append(depth_sensor_spec)\n\n # # Set semantic sensor\n # semantic_sensor_spec = habitat_sim.CameraSensorSpec()\n # semantic_sensor_spec.uuid = \"semantic_sensor\"\n # semantic_sensor_spec.sensor_type = habitat_sim.SensorType.SEMANTIC\n # semantic_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n # semantic_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n # semantic_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n # semantic_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n # agent_config.sensor_specifications.append(semantic_sensor_spec)\n\n # Set simulator configuration\n cfg = habitat_sim.Configuration(backend_cfg, [agent_config])\n\n # Set simulator\n sim = habitat_sim.Simulator(cfg)\n\n # set navmesh path for searching for navigatable points\n navmesh = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.navmesh'\n sim.pathfinder.load_nav_mesh(navmesh)\n\n # seed for navmesh\n sim.seed(random.randint(0, 1024))\n\n # Set simulation\n self.sim = sim\n print('Scene created!')\n\n return self\n\n import torch\n\n def add_audio_sensor(self):\n \"\"\"\n Add audio sensor to the scene\n \"\"\"\n\n # set audio sensor\n audio_sensor_spec = habitat_sim.AudioSensorSpec()\n audio_sensor_spec.uuid = \"audio_sensor\"\n audio_sensor_spec.enableMaterials = True # make sure _semantic.ply file is in the scene folder\n audio_sensor_spec.channelLayout.type = getattr(habitat_sim.sensor.RLRAudioPropagationChannelLayoutType, self.channel['type'])\n audio_sensor_spec.channelLayout.channelCount = self.channel_count # ambisonics\n\n # Set acoustic configuration\n audio_sensor_spec.acousticsConfig.sampleRate = self.acoustic_config['sampleRate']\n audio_sensor_spec.acousticsConfig.direct = self.acoustic_config['direct']\n audio_sensor_spec.acousticsConfig.indirect = self.acoustic_config['indirect']\n audio_sensor_spec.acousticsConfig.diffraction = self.acoustic_config['diffraction']\n audio_sensor_spec.acousticsConfig.transmission = self.acoustic_config['transmission']\n audio_sensor_spec.acousticsConfig.directSHOrder = self.acoustic_config['directSHOrder']\n audio_sensor_spec.acousticsConfig.indirectSHOrder = self.acoustic_config['indirectSHOrder']\n audio_sensor_spec.acousticsConfig.unitScale = self.acoustic_config['unitScale']\n audio_sensor_spec.acousticsConfig.frequencyBands = self.acoustic_config['frequencyBands']\n audio_sensor_spec.acousticsConfig.indirectRayCount = self.acoustic_config['indirectRayCount']\n # audio_sensor_spec.acousticsConfig.maxIRLength = 40.0\n # audio_sensor_spec.acousticsConfig.sourceRayCount = 2000\n # audio_sensor_spec.acousticsConfig.meshSimplification = False\n\n # Initialize receiver\n audio_sensor_spec.position = [0.0, self.aihabitat['sensor_height'], 0.0] # audio sensor has a height of 1.5m\n self.sim.add_sensor(audio_sensor_spec)\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioMaterialsJSON(self.audio_material)\n\n return self\n\n def create_receiver(self,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Randomly sample receiver position and rotation\n \"\"\"\n\n if position is None:\n # Randomly set receiver position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360)\n\n # Set sample rate\n sample_rate = self.acoustic_config['sampleRate']\n\n # Set receiver\n receiver = Receiver(position, rotation, sample_rate)\n\n # Update receiver\n self.update_receiver(receiver)\n\n return self\n\n def update_receiver(self,\n receiver: Receiver\n ):\n \"\"\"\n Update receiver\n \"\"\"\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver.position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.rotation = quat_from_angle_axis(math.radians(receiver.rotation), np.array([0, 1.0, 0])) # + -> left\n # new_state.rotation *= quat_from_angle_axis(math.radians(-30), np.array([1.0, 0, 0])) # + -> up\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n self.receiver = receiver # for reference\n\n return self\n\n def update_receiver_position(self,\n receiver_position: T.Tuple[float, float, float]\n ):\n \"\"\"\n Update receiver position\n \"\"\"\n\n self.receiver.position = receiver_position\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver_position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n return self\n\n def create_source(self,\n source_name: str,\n source_id: int,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Set source given the source name, position, and rotation\n \"\"\"\n\n if position is None:\n # Randomly set source position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360) # only for mesh as source sound is omnidirectional\n\n # Randomly set source sound\n dry_sound, mesh = sample_dry_sound_and_mesh(source_name)\n\n # Set source\n source = Source(position, rotation, dry_sound, mesh, device=self.device)\n\n # Save source\n self.update_source(source, source_id)\n\n return self\n\n def update_source(self,\n source: Source,\n source_id: int = None\n ):\n \"\"\"\n Update source\n \"\"\"\n\n if source_id is not None:\n # update source list\n self.source_list[source_id] = source\n\n # Add mesh\n if self.add_source_mesh:\n ########## Add mesh (source.position, source.rotation) ##########\n obj_templates_mgr = self.sim.get_object_template_manager()\n rigid_obj_mgr = self.sim.get_rigid_object_manager()\n\n # Load the object template from the configuration file\n obj_templates_mgr.load_configs(str(os.path.join(\"data/objects\")))\n\n # Insert the object relative to the agent\n object_ids = []\n object_orientation = mn.Quaternion.rotation(mn.Deg(source.rotation), mn.Vector3.y_axis())\n object_template_handle = obj_templates_mgr.get_template_handles(f'data/objects/{source.mesh}')[0] # debug\n if source.mesh == 'male':\n scale = 0.5\n height_offset = 0.935\n elif source.mesh == 'female':\n scale = 1.0\n height_offset = 0.85\n elif source.mesh == 'guitar':\n scale = 1 / 1239.1628 * 2\n height_offset = 1.5\n object_orientation *= mn.Quaternion.rotation(mn.Deg(-90), mn.Vector3.x_axis())\n elif source.mesh == 'drum':\n scale = 1 / 1.8\n height_offset = 0.6\n elif source.mesh == 'classic_microphone':\n scale = 1 / 1.15\n height_offset = 0.67\n elif source.mesh == 'bluetooth_speaker':\n scale = 1 / 70\n height_offset = 1.0\n\n # Scale the object to fit the scene\n scaled_object_template = obj_templates_mgr.get_template_by_handle(object_template_handle)\n scaled_object_template.scale = np.array([scale, scale, scale])\n obj_templates_mgr.register_template(scaled_object_template, \"scaled\")\n object = rigid_obj_mgr.add_object_by_template_handle(\"scaled\")\n object.translation = np.array(source.position) + np.array([0, height_offset, 0])\n object.rotation = object_orientation\n\n object_ids.append(object.object_id)\n\n # rigid_obj_mgr.remove_all_objects()\n\n else:\n # update current source\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source.position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n self.source_current = source # for reference\n\n return self\n\n def update_source_position(self,\n source_position\n ):\n \"\"\"\n Update Source position\n \"\"\"\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source_position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n def render_ir(self,\n source_id: int\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n source = self.source_list[source_id]\n self.update_source(source)\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_simple(self,\n source_position: T.Tuple[float, float, float],\n receiver_position: T.Tuple[float, float, float],\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n # source\n self.update_source_position(source_position)\n\n # receiver\n self.update_receiver_position(receiver_position)\n\n # render ir\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_all(self) -> T.List[torch.Tensor]:\n \"\"\"\n Render IR for all sources\n \"\"\"\n\n ir_list = []\n for source_id in range(self.n_sources):\n print(f'Rendering IR {source_id}/{self.n_sources}...')\n ir = self.render_ir(source_id)\n ir_list.append(ir)\n\n return ir_list\n\n def render_image(self,\n is_instance=False\n ):\n \"\"\"\n Render image including rgb, depth, and semantic\n \"\"\"\n\n observation = self.sim.get_sensor_observations()\n rgb = observation[\"color_sensor\"]\n depth = observation[\"depth_sensor\"]\n\n # Semantic\n # semantic = sim.get_sensor_observations()[\"semantic_sensor\"]\n # is_valid = (depth != 0)\n # semantic[~is_valid] = semantic.max() + 1\n\n # if is_instance:\n # # Display instance id\n # aihabitat_utils.display_sample(rgb, semantic, depth, filename=f'{dir_results}/view/view_instance.png')\n # else:\n # # Display category id\n # category = aihabitat_utils.semantic_id_to_category_id(semantic, sim.semantic_scene.objects)\n # void_id = 0\n # category[~is_valid] = void_id\n # aihabitat_utils.display_sample(rgb, category, depth, filename=f'{dir_results}/view/view_category.png')\n\n return rgb, depth\n\n def render_envmap(self):\n \"\"\"\n Render environment map in *** format\n \"\"\"\n\n with suppress_stdout_and_stderr():\n angles = [0, 270, 180, 90]\n rgb_panorama = []\n depth_panorama = []\n\n for angle_offset in angles:\n angle = self.receiver.rotation + angle_offset\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.rotation = quat_from_angle_axis(\n math.radians(angle), np.array([0, 1.0, 0])\n ) * quat_from_angle_axis(math.radians(0), np.array([1.0, 0, 0]))\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n observation = self.sim.get_sensor_observations()\n rgb_panorama.append(observation[\"color_sensor\"])\n depth_panorama.append((observation['depth_sensor']))\n envmap_rgb = np.concatenate(rgb_panorama, axis=1)\n envmap_depth = np.concatenate(depth_panorama, axis=1)\n\n # rotate receiver to original angle\n self.update_receiver(self.receiver)\n\n return envmap_rgb, envmap_depth\n\n def generate_xy_grid_points(self,\n grid_distance: float,\n height: float = None,\n filename_png: str = None,\n meters_per_pixel: float = 0.005\n ) -> torch.Tensor:\n \"\"\"\n Generate the 3D positions of grid points at the given height\n \"\"\"\n\n pathfinder = self.sim.pathfinder\n assert pathfinder.is_loaded\n # agent_height = pathfinder.nav_mesh_settings.agent_height # to be navigable, full body of the agent should be inside\n if height is None: # height of the agent foot\n height = 0\n # height = pathfinder.get_bounds()[0][1] # floor height\n\n # Sample grid\n bounds = pathfinder.get_bounds()\n x_points = torch.arange(bounds[0][0], bounds[1][0] + grid_distance, grid_distance)\n z_points = torch.arange(bounds[0][2], bounds[1][2] + grid_distance, grid_distance)\n x_grid, z_grid = torch.meshgrid(x_points, z_points)\n y_value = height * torch.ones_like(x_grid.reshape(-1))\n\n # Combine x, y, and z coordinates into a single tensor of points\n points = torch.stack([x_grid.reshape(-1), y_value.reshape(-1), z_grid.reshape(-1)], dim=-1)\n is_points_navigable = []\n for point in points:\n is_points_navigable.append(pathfinder.is_navigable(point)) # navigable points\n torch.tensor(is_points_navigable).sum()\n\n # Flatten the tensor of points into a list\n grid_points = points[is_points_navigable]\n\n # assert len(grid_points) > 0\n # save image\n if filename_png is not None:\n aihabitat_utils.save_town_map_grid(filename_png, pathfinder, grid_points, meters_per_pixel=meters_per_pixel)\n\n return grid_points\n\n def generate_data(self, use_dry_sound: bool = False):\n \"\"\"\n Generate all data including IR, envmap, audio, image\n \"\"\"\n\n # env map\n if self.include_visual_sensor:\n envmap_rgb, envmap_depth = self.render_image()\n else:\n envmap_rgb, envmap_depth = None, None\n\n # IR\n self.add_audio_sensor() # add audio_sensor after image rendering for faster image rendering\n ir_list = self.render_ir_all()\n # ir_total = sum_arrays_with_different_length(ir_list).detach().cpu()\n\n # audio_list\n dry_sound_list = []\n audio_list = []\n # audio_total = None\n if use_dry_sound:\n for source_id, source in enumerate(self.source_list):\n # load dry sound\n dry_sound = source.dry_sound\n if isinstance(dry_sound, str):\n dry_sound, sample_rate = torchaudio.load(dry_sound)\n self.dry_sound = dry_sound.to(self.device)\n self.sample_rate = sample_rate\n\n ir = ir_list[source_id]\n audio = torch.stack([audio_utils.fft_conv(dry_sound[0], ir_channel, is_cpu=True) for ir_channel in ir])\n dry_sound_list.append(dry_sound.detach().cpu())\n audio_list.append(audio.detach().cpu())\n\n # audio_total\n # audio_total = sum_arrays_with_different_length(audio_list)\n\n # cpu\n ir_list = [tensor.detach().cpu() for tensor in ir_list]\n\n # dirname = '.'\n # with open(f'{dirname}/debug.txt', 'w') as f:\n # f.write(f'NavMesh area: {self.sim.pathfinder.navigable_area}\\n')\n # f.write(f'NavMesh bounds: {self.sim.pathfinder.get_bounds()}\\n')\n # f.write(f'Receiver position: {self.receiver.position}\\n')\n # for s, source in enumerate(self.source_list):\n # f.write(f'Source {s} position: {source.position}\\n')\n # f.write(f'\\n')\n\n return dict(\n ir_list=ir_list,\n sample_rate=self.receiver.sample_rate,\n envmap=[envmap_rgb, envmap_depth],\n audio_list=audio_list,\n dry_sound_list=dry_sound_list,\n )" } ]
import os import json import argparse import itertools import subprocess import typing as T import torch import imageio import torchaudio import numpy as np import matplotlib.pyplot as plt from moviepy.editor import * from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp from nvas3d.utils.audio_utils import clip_two, clip_all from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene
10,957
""" # Set source and receiver points source_point_list = grid_points_source[source_idx_list] receiver_point_list = grid_points_receiver[receiver_idx_list] source_points_pair, receiver_points_pair = all_pairs(source_point_list, receiver_point_list) _, receiver_rotation_pair = all_pairs(source_point_list, receiver_rotation_list) room_list = [room] * len(source_points_pair) filename_list = None # Render RIR for grid points ir_list = render_rir_parallel(room_list, source_points_pair, receiver_points_pair, receiver_rotation_list=receiver_rotation_pair, filename_list=filename_list, channel_type=channel_type, channel_order=channel_order) ir_list = clip_all(ir_list) # make the length consistent num_channel = len(ir_list[0]) # Reshape RIR num_sources = len(source_idx_list) num_receivers = len(receiver_idx_list) ir_output = torch.stack(ir_list).reshape(num_sources, num_receivers, num_channel, -1) # '-1' will infer the remaining dimension based on the size of each tensor in ir_list ir_output /= ir_output.abs().max() return ir_output def interpolate_values( start: float, end: float, interp_weight: float ) -> float: """ Interpolate between two values based on the weight values. Args: - start: Beginning value. - end: Ending value. - interp_weight: Weight for linear interpolation Returns: - Interpolated value. """ return (1 - interp_weight) * start + interp_weight * end def main(args): """ Generate NVAS video from the estimated dry sound. Save: ├── {results_demo} = results/nvas3d_demo/default/demo/{room}/0 │ ├── video/ │ │ ├── moving_audio.wav : Audio interpolated for the moving receiver. │ │ ├── moving_audio_1.wav : Audio interpolated specifically for source 1. │ │ ├── moving_audio_2.wav : Audio interpolated specifically for source 2. │ │ ├── moving_video.mp4 : Video visualization of movement (no audio). │ │ ├── nvas.mp4 : NVAS video results with combined audio. │ │ ├── nvas_source1.mp4 : NVAS video results for only source 1 audio. │ │ ├── nvas_source2.mp4 : NVAS video results for only source 2 audio. │ │ └── rgb_receiver.png : A rendered view from the perspective of the receiver. """ # Constants sample_rate = args.sample_rate sample_rate_video = args.sample_rate_video novel_path_config = args.novel_path_config use_gt_location = args.use_gt_location channel_type = args.channel_type use_placeholder_mesh = args.use_placeholder_mesh # Load data and metadata metadata = torch.load(f'{args.results_dir}/results_detection/metadata.pt') room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] if use_gt_location: # Use estimated dry sound from GT source location source1_idx = metadata['source1_idx'][0].item() source2_idx = metadata['source2_idx'][0].item() source_idx_list = [source1_idx, source2_idx] else: # Use estimated dry sound from detected source location detected_source1_idx = metadata['detected_source_idx'][0] detected_source2_idx = metadata['detected_source_idx'][1] source_idx_list = [detected_source1_idx, detected_source2_idx] # Define receiver path and rotations with open(f'demo/config_demo/{novel_path_config}.json', 'r') as file: json_path = json.load(file) receiver_idx_list = json_path['receiver_idx_list'] receiver_rotation_list = json_path['receiver_rotation_list'] # Load grid points grid_points_receiver = load_room_grid(room, grid_distance=args.grid_distance)['grid_points'] # Generate RIRs output_dir = f'{args.results_dir}/video_{channel_type}' os.makedirs(output_dir, exist_ok=True) ir_save_dir = f'{output_dir}/ir_save_{novel_path_config}_{channel_type}.pt' if os.path.exists(ir_save_dir): ir_output = torch.load(ir_save_dir) else: ir_output = generate_rir_combination( room, source_idx_list, grid_points_source, receiver_idx_list, receiver_rotation_list, grid_points_receiver, channel_type ) torch.save(ir_output, ir_save_dir) ir1_list, ir2_list = ir_output # Prepare source audio if use_gt_location: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry1_estimated.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry2_estimated.wav') else: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[0]}.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[1]}.wav')
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def normalize(input: torch.Tensor) -> torch.Tensor: output = (input - input.min()) / (input.max() - input.min()) output = 2 * output - 1 return output def configure_scene_from_metadata( metadata: T.Dict[str, T.Any], image_size: T.Tuple[int, int] = (1000, 1000), hfov: float = 90.0, use_placeholder_mesh: bool = False ) -> Scene: """ Configures a scene using the provided metadata. Args: - metadata: Dictionary containing room and grid point information. - image_size: The size of the rendered image. - hfov: Horizontal field of view. - use_placeholder_mesh: Flag to determine if placeholder meshes should be used. Returns: - Configured scene object. """ room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] scene = create_scene(room, image_size=image_size, hfov=hfov) if use_placeholder_mesh: # Add placeholder mesh for sources and receivers to the scene # Download the following mesh objects and locate it under data/objects/{mesh_name}.glb: # - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/) # - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/) ss_source1 = Source( position=grid_points_source[source_idx_list[0]], rotation=0, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_source2 = Source( position=grid_points_source[source_idx_list[1]], rotation=-90, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_mic_list = [ Source( position=grid_points_source[idx], rotation=180, dry_sound='', mesh='classic_microphone', # Need mesh object device=torch.device('cpu') ) for idx in receiver_idx_list_original ] scene.add_source_mesh = True scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original)) scene.update_source(ss_source1, 0) scene.update_source(ss_source2, 1) for m, mic in enumerate(ss_mic_list): scene.update_source(mic, m + 2) return scene def interpolate_moving_audio( source1_audio: torch.Tensor, source2_audio: torch.Tensor, ir1_list: T.List[torch.Tensor], ir2_list: T.List[torch.Tensor], receiver_position: torch.Tensor ) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Interpolates audio for a moving receiver. Args: - source1_audio: First source audio. - source2_audio: Second source audio. - ir1_list: List of impulse responses for source 1. - ir2_list: List of impulse responses for source 2. - receiver_position: Positions of the moving receiver. Returns: - Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2. """ # Prepare for interpolation audio_len = source1_audio.shape[-1] interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len) # Generate audio for moving receiver receiver_audio_1 = convolve_moving_receiver(source1_audio.numpy()[0], ir1_list.numpy(), interp_index, interp_weight) receiver_audio_2 = convolve_moving_receiver(source2_audio.numpy()[0], ir2_list.numpy(), interp_index, interp_weight) receiver_audio_1 = receiver_audio_1[..., :source1_audio.shape[-1]] receiver_audio_2 = receiver_audio_2[..., :source1_audio.shape[-1]] # Mix and normalize audios receiver_audio = (receiver_audio_1 + receiver_audio_2) scale = np.max(abs(receiver_audio)) receiver_audio /= scale receiver_audio_1 /= scale receiver_audio_2 /= scale return torch.from_numpy(receiver_audio), torch.from_numpy(receiver_audio_1), torch.from_numpy(receiver_audio_2) def interpolate_rgb_images( scene: Scene, receiver_position: torch.Tensor, receiver_rotation_list: T.List[float], video_len: int ) -> T.List[np.ndarray]: """ Interpolates RGB images based on receiver movement and rotation. Args: - scene: Scene object to render the images from. - receiver_position: Positions of the receiver along the path. - receiver_rotation_list: List of rotations for the receiver. - video_len: Number of frames in the video. Returns: - List of interpolated RGB images. """ interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), video_len) interpolated_rgb_list = [] for t in range(len(interp_index)): # Find the positions and rotations between which we're interpolating start_idx = interp_index[t] end_idx = start_idx + 1 start_pos = receiver_position[start_idx] end_pos = receiver_position[end_idx] start_rot = receiver_rotation_list[start_idx] end_rot = receiver_rotation_list[end_idx] # Interpolate position and rotation receiver_position_interp = interpolate_values(start_pos, end_pos, interp_weight[t]) receiver_rotation_interp = interpolate_values(start_rot, end_rot, interp_weight[t]) receiver = Receiver(receiver_position_interp, receiver_rotation_interp) scene.update_receiver(receiver) rgb, _ = scene.render_image() interpolated_rgb_list.append(rgb[..., :3]) return interpolated_rgb_list def all_pairs( list1: T.List[T.Any], list2: T.List[T.Any] ) -> T.Tuple[T.List[T.Any], T.List[T.Any]]: """ Computes all pairs of combinations between two lists. Args: - list1: First list. - list2: Second list. Returns: - Two lists containing paired elements from list1 and list2. """ list_pair = list(itertools.product(list1, list2)) list1_pair, list2_pair = zip(*list_pair) list1_pair = list(list1_pair) list2_pair = list(list2_pair) return list1_pair, list2_pair def generate_rir_combination( room: str, source_idx_list: T.List[int], grid_points_source: torch.Tensor, receiver_idx_list: T.List[int], receiver_rotation_list: T.List[float], grid_points_receiver: torch.Tensor, channel_type: str = 'Binaural', channel_order: int = 0 ) -> T.List[T.List[torch.Tensor]]: """ Generates room impulse responses (RIR) for given source and receiver combinations. Args: - room: Room object for which RIRs need to be computed. - source_idx_list: List of source indices. - grid_points_source: Grid points for the source. - receiver_idx_list: List of receiver indices. - receiver_rotation_list: List of receiver rotations. - grid_points_receiver: Grid points for the receiver. - channel_type: Type of the channel. Defaults to 'Ambisonics'. - channel_order: Order of the channel for Ambisonics. Defulats to 0, as video usually does not support HOA. Returns: - A 2D list containing RIRs for every source-receiver combination. """ # Set source and receiver points source_point_list = grid_points_source[source_idx_list] receiver_point_list = grid_points_receiver[receiver_idx_list] source_points_pair, receiver_points_pair = all_pairs(source_point_list, receiver_point_list) _, receiver_rotation_pair = all_pairs(source_point_list, receiver_rotation_list) room_list = [room] * len(source_points_pair) filename_list = None # Render RIR for grid points ir_list = render_rir_parallel(room_list, source_points_pair, receiver_points_pair, receiver_rotation_list=receiver_rotation_pair, filename_list=filename_list, channel_type=channel_type, channel_order=channel_order) ir_list = clip_all(ir_list) # make the length consistent num_channel = len(ir_list[0]) # Reshape RIR num_sources = len(source_idx_list) num_receivers = len(receiver_idx_list) ir_output = torch.stack(ir_list).reshape(num_sources, num_receivers, num_channel, -1) # '-1' will infer the remaining dimension based on the size of each tensor in ir_list ir_output /= ir_output.abs().max() return ir_output def interpolate_values( start: float, end: float, interp_weight: float ) -> float: """ Interpolate between two values based on the weight values. Args: - start: Beginning value. - end: Ending value. - interp_weight: Weight for linear interpolation Returns: - Interpolated value. """ return (1 - interp_weight) * start + interp_weight * end def main(args): """ Generate NVAS video from the estimated dry sound. Save: ├── {results_demo} = results/nvas3d_demo/default/demo/{room}/0 │ ├── video/ │ │ ├── moving_audio.wav : Audio interpolated for the moving receiver. │ │ ├── moving_audio_1.wav : Audio interpolated specifically for source 1. │ │ ├── moving_audio_2.wav : Audio interpolated specifically for source 2. │ │ ├── moving_video.mp4 : Video visualization of movement (no audio). │ │ ├── nvas.mp4 : NVAS video results with combined audio. │ │ ├── nvas_source1.mp4 : NVAS video results for only source 1 audio. │ │ ├── nvas_source2.mp4 : NVAS video results for only source 2 audio. │ │ └── rgb_receiver.png : A rendered view from the perspective of the receiver. """ # Constants sample_rate = args.sample_rate sample_rate_video = args.sample_rate_video novel_path_config = args.novel_path_config use_gt_location = args.use_gt_location channel_type = args.channel_type use_placeholder_mesh = args.use_placeholder_mesh # Load data and metadata metadata = torch.load(f'{args.results_dir}/results_detection/metadata.pt') room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] if use_gt_location: # Use estimated dry sound from GT source location source1_idx = metadata['source1_idx'][0].item() source2_idx = metadata['source2_idx'][0].item() source_idx_list = [source1_idx, source2_idx] else: # Use estimated dry sound from detected source location detected_source1_idx = metadata['detected_source_idx'][0] detected_source2_idx = metadata['detected_source_idx'][1] source_idx_list = [detected_source1_idx, detected_source2_idx] # Define receiver path and rotations with open(f'demo/config_demo/{novel_path_config}.json', 'r') as file: json_path = json.load(file) receiver_idx_list = json_path['receiver_idx_list'] receiver_rotation_list = json_path['receiver_rotation_list'] # Load grid points grid_points_receiver = load_room_grid(room, grid_distance=args.grid_distance)['grid_points'] # Generate RIRs output_dir = f'{args.results_dir}/video_{channel_type}' os.makedirs(output_dir, exist_ok=True) ir_save_dir = f'{output_dir}/ir_save_{novel_path_config}_{channel_type}.pt' if os.path.exists(ir_save_dir): ir_output = torch.load(ir_save_dir) else: ir_output = generate_rir_combination( room, source_idx_list, grid_points_source, receiver_idx_list, receiver_rotation_list, grid_points_receiver, channel_type ) torch.save(ir_output, ir_save_dir) ir1_list, ir2_list = ir_output # Prepare source audio if use_gt_location: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry1_estimated.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/dry2_estimated.wav') else: source1_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[0]}.wav') source2_audio, _ = torchaudio.load(f'{args.results_dir}/results_drysound/detected/dry_{source_idx_list[1]}.wav')
source1_audio, source2_audio = clip_two(source1_audio, source2_audio)
2
2023-10-19 05:35:54+00:00
16k
openvpi/SingingVocoders
training/nsf_HiFigan_chroma_task.py
[ { "identifier": "Generator", "path": "models/nsf_HiFigan_chroma/models.py", "snippet": "class Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.m_source = SourceModuleHnNSF(\n sampling_rate=h.sampling_rate,\n harmonic_num=8,\n use_chroma=True,\n )\n self.noise_convs = nn.ModuleList()\n self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n c_cur = h.upsample_initial_channel // (2 ** (i + 1))\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)),\n k, u, padding=(k - u) // 2)))\n if i + 1 < len(h.upsample_rates): #\n stride_f0 = int(np.prod(h.upsample_rates[i + 1:]))\n self.noise_convs.append(Conv1d(\n 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))\n else:\n self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))\n self.resblocks = nn.ModuleList()\n ch = h.upsample_initial_channel\n for i in range(len(self.ups)):\n ch //= 2\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.upp = int(np.prod(h.upsample_rates))\n\n def forward(self, x, f0):\n har_source = self.m_source(f0, self.upp).transpose(1, 2)\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n x_source = self.noise_convs[i](har_source)\n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n # rank_zero_info('Removing weight norm...')\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)" }, { "identifier": "AttrDict", "path": "models/nsf_HiFigan_chroma/models.py", "snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self" }, { "identifier": "MultiScaleDiscriminator", "path": "models/nsf_HiFigan_chroma/models.py", "snippet": "class MultiScaleDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiScaleDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList(\n [\n DiscriminatorS(use_spectral_norm=True),\n DiscriminatorS(),\n DiscriminatorS(),\n ]\n )\n self.meanpools = nn.ModuleList(\n [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]\n )\n\n def forward(self, y):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n if i != 0:\n y = self.meanpools[i - 1](y)\n\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs," }, { "identifier": "MultiPeriodDiscriminator", "path": "models/nsf_HiFigan_chroma/models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, periods=None):\n super(MultiPeriodDiscriminator, self).__init__()\n self.periods = periods if periods is not None else [2, 3, 5, 7, 11]\n self.discriminators = nn.ModuleList()\n for period in self.periods:\n self.discriminators.append(DiscriminatorP(period))\n\n def forward(self, y):\n y_d_rs = []\n\n fmap_rs = []\n\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs," }, { "identifier": "HiFiloss", "path": "modules/loss/HiFiloss.py", "snippet": "class HiFiloss(nn.Module):\n def __init__(self, config: dict):\n super().__init__()\n self.mel = PitchAdjustableMelSpectrogram(sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'], )\n self.L1loss = nn.L1Loss()\n self.lab_aux_loss = config.get('lab_aux_loss', 45)\n self.lab_aux_mel_loss = config.get('lab_aux_melloss', self.lab_aux_loss)\n self.lab_aux_stft_loss = config.get('lab_aux_stftloss', 2.5)\n if config.get('use_stftloss', False):\n self.stft = warp_stft({'fft_sizes': config['loss_fft_sizes'], 'hop_sizes': config['loss_hop_sizes'],\n 'win_lengths': config['loss_win_lengths']})\n self.use_stftloss = config.get('use_stftloss', False)\n\n def discriminator_loss(self, disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses = 0\n glosses = 0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses += r_loss.item()\n glosses += g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses, glosses, r_losses, g_losses\n\n def Dloss(self, Dfake, Dtrue):\n\n (Fmsd_out, _), (Fmpd_out, _) = Dfake\n (Tmsd_out, _), (Tmpd_out, _) = Dtrue\n msdloss, msdrlosses, msdglosses, _, _ = self.discriminator_loss(Tmsd_out, Fmsd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss = msdloss + mpdloss\n return loss, {'DmsdlossF': msdglosses, 'DmsdlossT': msdrlosses, 'DmpdlossT': mpdrlosses,\n 'DmpdlossF': mpdglosses}\n\n def feature_loss(self, fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self, GDfake, GDtrue):\n loss = 0\n gen_losses = []\n msd_losses = 0\n mpd_losses = 0\n (msd_out, Fmsd_feature), (mpd_out, Fmpd_feature) = GDfake\n (_, Tmsd_feature), (_, Tmpd_feature) = GDtrue\n for dg in msd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n msd_losses = l + msd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses = l + mpd_losses\n\n msd_feature_loss = self.feature_loss(Tmsd_feature, Fmsd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss = msd_feature_loss + mpd_feature_loss + mpd_losses + msd_losses\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmsdloss': msd_losses, 'Gmpdloss': mpd_losses, 'Gmsd_feature_loss': msd_feature_loss,\n 'Gmpd_feature_loss': mpd_feature_loss}\n\n def Auxloss(self, Goutput, sample):\n Gmel = self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n mel_loss = self.L1loss(Gmel, Rmel) * self.lab_aux_mel_loss\n if self.use_stftloss:\n sc_loss, mag_loss = self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n stft_loss = (sc_loss + mag_loss) * self.lab_aux_stft_loss\n loss = mel_loss + stft_loss\n return loss, {'auxloss': loss, 'auxloss_mel': mel_loss, 'auxloss_stft': stft_loss}\n return mel_loss, {'auxloss': mel_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # sc_loss, mag_loss=self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n # loss=(sc_loss+ mag_loss)*self.labauxloss\n # return loss,{'auxloss':loss,'auxloss_sc_loss':sc_loss,'auxloss_mag_loss':mag_loss}\n #" }, { "identifier": "GanBaseTask", "path": "training/base_task_gan.py", "snippet": "class GanBaseTask(pl.LightningModule):\n \"\"\"\n Base class for training tasks.\n 1. *load_ckpt*:\n load checkpoint;\n 2. *training_step*:\n record and log the loss;\n 3. *optimizer_step*:\n run backwards step;\n 4. *start*:\n load training configs, backup code, log to tensorboard, start training;\n 5. *configure_ddp* and *init_ddp_connection*:\n start parallel training.\n\n Subclasses should define:\n 1. *build_model*, *build_optimizer*, *build_scheduler*:\n how to build the model, the optimizer and the training scheduler;\n 2. *_training_step*:\n one training step of the model;\n 3. *on_validation_end* and *_on_validation_end*:\n postprocess the validation output.\n \"\"\"\n\n def __init__(self, config: dict, *args, **kwargs):\n # dataset configs\n super().__init__(*args, **kwargs)\n self.dataset_cls = None\n self.config = config\n # self.max_batch_frames = self.config['max_batch_frames']\n # self.max_batch_size = self.config['max_batch_size']\n # self.max_val_batch_frames = self.config['max_val_batch_frames']\n # self.max_val_batch_size = self.config['max_val_batch_size']\n\n # self.accumulate_grad_batches = self.config['accumulate_grad_batches']\n self.clip_grad_norm = self.config['clip_grad_norm']\n\n self.training_sampler = None\n self.model = None\n self.generator = None\n self.discriminator = None\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = False\n\n self.valid_losses: Dict[str, Metric] = {\n 'total_loss': MeanMetric()\n }\n self.valid_metric_names = set()\n self.mix_loss = None\n\n self.automatic_optimization = False\n self.skip_immediate_validations = 0\n\n self.aux_step = self.config.get('aux_step')\n self.train_dataset = None\n self.valid_dataset = None\n\n ###########\n\n # Training, validation and testing\n ###########\n def setup(self, stage):\n self.model = self.build_model()\n self.unfreeze_all_params()\n if self.config['freezing_enabled']:\n self.freeze_params()\n if self.config['finetune_enabled'] and get_latest_checkpoint_path(\n pathlib.Path(self.config['work_dir'])) is None:\n self.load_finetune_ckpt(self.load_pre_train_model())\n self.print_arch()\n self.build_losses_and_metrics()\n self.build_dataset()\n # self.train_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['train_set_name'], allow_aug=True\n # )\n # self.valid_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['valid_set_name'], allow_aug=False\n # )\n\n def build_dataset(self):\n raise NotImplementedError()\n\n def get_need_freeze_state_dict_key(self, model_state_dict) -> list:\n key_list = []\n for i in self.config['frozen_params']:\n for j in model_state_dict:\n if j.startswith(i):\n key_list.append(j)\n return list(set(key_list))\n\n def freeze_params(self) -> None:\n model_state_dict = self.state_dict().keys()\n freeze_key = self.get_need_freeze_state_dict_key(model_state_dict=model_state_dict)\n\n for i in freeze_key:\n params = self.get_parameter(i)\n\n params.requires_grad = False\n\n def unfreeze_all_params(self) -> None:\n for i in self.parameters():\n i.requires_grad = True\n\n def load_finetune_ckpt(\n self, state_dict\n ) -> None:\n\n adapt_shapes = self.config['finetune_strict_shapes']\n if not adapt_shapes:\n cur_model_state_dict = self.state_dict()\n unmatched_keys = []\n for key, param in state_dict.items():\n if key in cur_model_state_dict:\n new_param = cur_model_state_dict[key]\n if new_param.shape != param.shape:\n unmatched_keys.append(key)\n print('| Unmatched keys: ', key, new_param.shape, param.shape)\n for key in unmatched_keys:\n del state_dict[key]\n self.load_state_dict(state_dict, strict=False)\n\n def load_pre_train_model(self):\n\n pre_train_ckpt_path = self.config.get('finetune_ckpt_path')\n blacklist = self.config.get('finetune_ignored_params')\n if blacklist is None:\n blacklist = []\n # if whitelist is None:\n # raise RuntimeError(\"\")\n\n if pre_train_ckpt_path is not None:\n ckpt = torch.load(pre_train_ckpt_path)\n\n state_dict = {}\n for i in ckpt['state_dict']:\n # if 'diffusion' in i:\n # if i in rrrr:\n # continue\n skip = False\n for b in blacklist:\n if i.startswith(b):\n skip = True\n break\n\n if skip:\n continue\n\n state_dict[i] = ckpt['state_dict'][i]\n print(i)\n return state_dict\n else:\n raise RuntimeError(\"\")\n\n def build_model(self):\n raise NotImplementedError()\n\n @rank_zero_only\n def print_arch(self):\n utils.print_arch(self)\n\n def build_losses_and_metrics(self):\n raise NotImplementedError()\n\n def register_metric(self, name: str, metric: Metric):\n assert isinstance(metric, Metric)\n setattr(self, name, metric)\n self.valid_metric_names.add(name)\n\n # def run_model(self, sample, infer=False):\n # \"\"\"\n # steps:\n # 1. run the full model\n # 2. calculate losses if not infer\n # \"\"\"\n # raise NotImplementedError()\n\n def Gforward(self, sample, infer=False):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n def Dforward(self, Goutput):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n # def on_train_epoch_start(self):\n # if self.training_sampler is not None:\n # self.training_sampler.set_epoch(self.current_epoch)\n\n def _training_step(self, sample, batch_idx):\n \"\"\"\n :return: total loss: torch.Tensor, loss_log: dict, other_log: dict\n\n \"\"\"\n aux_only = False\n if self.aux_step is not None:\n if self.aux_step > self.global_step:\n aux_only = True\n\n log_diet = {}\n opt_g, opt_d = self.optimizers()\n Goutput = self.Gforward(sample=sample)\n if not aux_only:\n Dfake = self.Dforward(Goutput=Goutput['audio'].detach())\n Dtrue = self.Dforward(Goutput=sample['audio'])\n Dloss, Dlog = self.mix_loss.Dloss(Dfake=Dfake, Dtrue=Dtrue)\n log_diet.update(Dlog)\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Dloss/self.clip_grad_norm)\n # else:\n opt_d.zero_grad()\n self.manual_backward(Dloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_d, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_d.step()\n opt_d.zero_grad()\n if not aux_only:\n GDfake = self.Dforward(Goutput=Goutput['audio'])\n GDtrue = self.Dforward(Goutput=sample['audio'])\n GDloss, GDlog = self.mix_loss.GDloss(GDfake=GDfake,GDtrue=GDtrue)\n log_diet.update(GDlog)\n Auxloss, Auxlog = self.mix_loss.Auxloss(Goutput=Goutput, sample=sample)\n\n log_diet.update(Auxlog)\n if not aux_only:\n Gloss=GDloss + Auxloss\n else:\n Gloss=Auxloss\n\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Gloss / self.clip_grad_norm)\n # else:\n # self.manual_backward(Gloss)\n # if (batch_idx + 1) % self.accumulate_grad_batches == 0:\n opt_g.zero_grad()\n self.manual_backward(Gloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_g, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_g.step()\n\n\n\n return log_diet\n\n def training_step(self, sample, batch_idx, ): # todo\n log_outputs = self._training_step(sample, batch_idx)\n\n # logs to progress bar\n self.log_dict({'loss':sum(log_outputs.values())}, prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # self.log('lr', self.lr_schedulers().get_last_lr()[0], prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # logs to tensorboard\n if self.global_step % self.config['log_interval'] == 0:\n tb_log = {f'training/{k}': v for k, v in log_outputs.items()}\n # tb_log['training/lr'] = self.lr_schedulers().get_last_lr()[0]\n self.logger.log_metrics(tb_log, step=self.global_step)\n #\n # return total_loss\n\n # def on_before_optimizer_step(self, *args, **kwargs):\n # self.log_dict(grad_norm(self, norm_type=2))\n\n def _on_validation_start(self):\n pass\n\n def on_validation_start(self):\n self._on_validation_start()\n for metric in self.valid_losses.values():\n metric.to(self.device)\n metric.reset()\n\n def _validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n :return: loss_log: dict, weight: int\n \"\"\"\n raise NotImplementedError()\n\n def validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n\n \"\"\"\n\n # if self.skip_immediate_validations == 0 and self.global_step != 0:\n # self.skip_immediate_validation = True\n # self.skip_immediate_validations = 1\n # if self.global_step == 0:\n # self.skip_immediate_validations = 1\n\n if self.skip_immediate_validation:\n rank_zero_debug(f\"Skip validation {batch_idx}\")\n return {}\n with torch.autocast(self.device.type, enabled=False):\n losses, weight = self._validation_step(sample, batch_idx)\n losses = {\n 'total_loss': sum(losses.values()),\n **losses\n }\n for k, v in losses.items():\n if k not in self.valid_losses:\n self.valid_losses[k] = MeanMetric().to(self.device)\n self.valid_losses[k].update(v, weight=weight) # weight=1\n return losses\n\n def on_validation_epoch_end(self):\n if self.skip_immediate_validation:\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = True\n return\n loss_vals = {k: v.compute() for k, v in self.valid_losses.items()}\n self.log('val_loss', loss_vals['total_loss'], on_epoch=True, prog_bar=True, logger=False, sync_dist=True)\n self.logger.log_metrics({f'validation/{k}': v for k, v in loss_vals.items()}, step=self.global_step)\n for metric in self.valid_losses.values():\n metric.reset()\n metric_vals = {k: getattr(self, k).compute() for k in self.valid_metric_names}\n self.logger.log_metrics({f'metrics/{k}': v for k, v in metric_vals.items()}, step=self.global_step)\n for metric_name in self.valid_metric_names:\n getattr(self, metric_name).reset()\n\n # noinspection PyMethodMayBeStatic\n def build_scheduler(self, optimizer):\n from utils import build_lr_scheduler_from_config\n\n scheduler_args = self.config['lr_scheduler_args']\n assert scheduler_args['scheduler_cls'] != ''\n scheduler = build_lr_scheduler_from_config(optimizer, scheduler_args)\n return scheduler\n\n # noinspection PyMethodMayBeStatic\n def build_optimizer(self, model, optimizer_args):\n from utils import build_object_from_class_name\n\n assert optimizer_args['optimizer_cls'] != ''\n if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n\n if isinstance(model, nn.ModuleList):\n parameterslist = []\n for i in model:\n parameterslist = parameterslist + list(i.parameters())\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.ModuleDict):\n parameterslist = []\n for i in model:\n # parameterslist = parameterslist + list(model[i].parameters())\n parameterslist.append({'params': model[i].parameters()})\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.Module):\n\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n model.parameters(),\n **optimizer_args\n )\n else:\n raise RuntimeError(\"\")\n\n return optimizer\n\n def configure_optimizers(self):\n optG = self.build_optimizer(self.generator, optimizer_args=self.config['generater_optimizer_args'])\n optD = self.build_optimizer(self.discriminator, optimizer_args=self.config['discriminate_optimizer_args'])\n\n return [optG, optD]\n # scheduler = self.build_scheduler(optm)\n # if scheduler is None:\n # return optm\n # return {\n # \"optimizer\": optm,\n # \"lr_scheduler\": {\n # \"scheduler\": scheduler,\n # \"interval\": \"step\",\n # \"frequency\": 1\n # }\n # }\n\n def train_dataloader(self):\n # self.training_sampler = DsBatchSampler(\n # self.train_dataset,\n # max_batch_frames=self.max_batch_frames,\n # max_batch_size=self.max_batch_size,\n # num_replicas=(self.trainer.distributed_sampler_kwargs or {}).get('num_replicas', 1),\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # sort_by_similar_size=self.config['sort_by_len'],\n # required_batch_count_multiple=self.config['accumulate_grad_batches'],\n # frame_count_grid=self.config['sampler_frame_count_grid'],\n # shuffle_sample=True,\n # shuffle_batch=False,\n # seed=self.config['seed']\n # )\n return torch.utils.data.DataLoader(self.train_dataset,\n collate_fn=self.train_dataset.collater,\n batch_size=self.config['batch_size'],\n # batch_sampler=self.training_sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n pin_memory=True,\n persistent_workers=True)\n\n def val_dataloader(self):\n # sampler = DsEvalBatchSampler(\n # self.valid_dataset,\n # max_batch_frames=self.max_val_batch_frames,\n # max_batch_size=self.max_val_batch_size,\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # batch_by_size=False\n # )\n return torch.utils.data.DataLoader(self.valid_dataset,\n collate_fn=self.valid_dataset.collater,\n batch_size=1,\n # batch_sampler=sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n shuffle=False)\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def on_test_start(self):\n self.on_validation_start()\n\n def test_step(self, sample, batch_idx):\n return self.validation_step(sample, batch_idx)\n\n def on_test_end(self):\n return self.on_validation_end()\n\n def on_save_checkpoint(self, checkpoint):\n pass\n # checkpoint['trainer_stage'] = self.trainer.state.stage.value\n\n # def on_load_checkpoint(self, checkpoint):\n # # from lightning.pytorch.trainer.states import RunningStage\n # from utils import simulate_lr_scheduler\n # # if checkpoint.get('trainer_stage', '') == RunningStage.VALIDATING.value:\n # # self.skip_immediate_validation = True\n #\n # optimizer_args = self.config['optimizer_args']\n # scheduler_args = self.config['lr_scheduler_args']\n #\n # if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n # optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n #\n # if checkpoint.get('optimizer_states', None):\n # opt_states = checkpoint['optimizer_states']\n # assert len(opt_states) == 1 # only support one optimizer\n # opt_state = opt_states[0]\n # for param_group in opt_state['param_groups']:\n # for k, v in optimizer_args.items():\n # if k in param_group and param_group[k] != v:\n # if 'lr_schedulers' in checkpoint and checkpoint['lr_schedulers'] and k == 'lr':\n # continue\n # rank_zero_info(f'| Overriding optimizer parameter {k} from checkpoint: {param_group[k]} -> {v}')\n # param_group[k] = v\n # if 'initial_lr' in param_group and param_group['initial_lr'] != optimizer_args['lr']:\n # rank_zero_info(\n # f'| Overriding optimizer parameter initial_lr from checkpoint: {param_group[\"initial_lr\"]} -> {optimizer_args[\"lr\"]}'\n # )\n # param_group['initial_lr'] = optimizer_args['lr']\n #\n # if checkpoint.get('lr_schedulers', None):\n # assert checkpoint.get('optimizer_states', False)\n # assert len(checkpoint['lr_schedulers']) == 1 # only support one scheduler\n # checkpoint['lr_schedulers'][0] = simulate_lr_scheduler(\n # optimizer_args, scheduler_args,\n # step_count=checkpoint['global_step'],\n # num_param_groups=len(checkpoint['optimizer_states'][0]['param_groups'])\n # )\n # for param_group, new_lr in zip(\n # checkpoint['optimizer_states'][0]['param_groups'],\n # checkpoint['lr_schedulers'][0]['_last_lr'],\n # ):\n # if param_group['lr'] != new_lr:\n # rank_zero_info(\n # f'| Overriding optimizer parameter lr from checkpoint: {param_group[\"lr\"]} -> {new_lr}')\n # param_group['lr'] = new_lr" }, { "identifier": "DsBatchSampler", "path": "utils/training_utils.py", "snippet": "class DsBatchSampler(Sampler):\n def __init__(self, dataset, max_batch_frames, max_batch_size, sub_indices=None,\n num_replicas=None, rank=None, frame_count_grid=200,\n required_batch_count_multiple=1, batch_by_size=True, sort_by_similar_size=True,\n shuffle_sample=False, shuffle_batch=False, seed=0, drop_last=False) -> None:\n self.dataset = dataset\n self.max_batch_frames = max_batch_frames\n self.max_batch_size = max_batch_size\n self.sub_indices = sub_indices\n self.num_replicas = num_replicas\n self.rank = rank\n self.frame_count_grid = frame_count_grid\n self.required_batch_count_multiple = required_batch_count_multiple\n self.batch_by_size = batch_by_size\n self.sort_by_similar_size = sort_by_similar_size\n self.shuffle_sample = shuffle_sample\n self.shuffle_batch = shuffle_batch\n self.seed = seed\n self.drop_last = drop_last\n self.epoch = 0\n self.batches = None\n self.formed = None\n\n def __form_batches(self):\n if self.formed == self.epoch + self.seed:\n return\n rng = np.random.default_rng(self.seed + self.epoch)\n if self.shuffle_sample:\n if self.sub_indices is not None:\n rng.shuffle(self.sub_indices)\n indices = np.array(self.sub_indices)\n else:\n indices = rng.permutation(len(self.dataset))\n\n if self.sort_by_similar_size:\n grid = self.frame_count_grid\n assert grid > 0\n sizes = (np.round(np.array(self.dataset._sizes)[indices] / grid) * grid).clip(grid, None).astype(\n np.int64)\n indices = indices[np.argsort(sizes, kind='mergesort')]\n\n indices = indices.tolist()\n else:\n indices = self.sub_indices if self.sub_indices is not None else list(range(len(self.dataset)))\n\n if self.batch_by_size:\n batches = utils.batch_by_size(\n indices, self.dataset.num_frames,\n max_batch_frames=self.max_batch_frames,\n max_batch_size=self.max_batch_size\n )\n else:\n batches = [indices[i:i + self.max_batch_size] for i in range(0, len(indices), self.max_batch_size)]\n\n floored_total_batch_count = (len(batches) // self.num_replicas) * self.num_replicas\n if self.drop_last and len(batches) > floored_total_batch_count:\n batches = batches[:floored_total_batch_count]\n leftovers = []\n else:\n leftovers = (rng.permutation(len(batches) - floored_total_batch_count) + floored_total_batch_count).tolist()\n\n batch_assignment = rng.permuted(\n np.arange(floored_total_batch_count).reshape(-1, self.num_replicas).transpose(), axis=0\n )[self.rank].tolist()\n floored_batch_count = len(batch_assignment)\n ceiled_batch_count = floored_batch_count + (1 if len(leftovers) > 0 else 0)\n if self.rank < len(leftovers):\n batch_assignment.append(leftovers[self.rank])\n elif len(leftovers) > 0:\n batch_assignment.append(batch_assignment[self.epoch % floored_batch_count])\n if self.required_batch_count_multiple > 1 and ceiled_batch_count % self.required_batch_count_multiple != 0:\n # batch_assignment = batch_assignment[:((floored_batch_count \\\n # // self.required_batch_count_multiple) * self.required_batch_count_multiple)]\n ceiled_batch_count = math.ceil(\n ceiled_batch_count / self.required_batch_count_multiple) * self.required_batch_count_multiple\n for i in range(ceiled_batch_count - len(batch_assignment)):\n batch_assignment.append(\n batch_assignment[(i + self.epoch * self.required_batch_count_multiple) % floored_batch_count])\n\n self.batches = [deepcopy(batches[i]) for i in batch_assignment]\n\n if self.shuffle_batch:\n rng.shuffle(self.batches)\n\n del indices\n del batches\n del batch_assignment\n\n def __iter__(self):\n self.__form_batches()\n return iter(self.batches)\n\n def __len__(self):\n self.__form_batches()\n if self.batches is None:\n raise RuntimeError(\"Batches are not initialized. Call __form_batches first.\")\n return len(self.batches)\n\n def set_epoch(self, epoch):\n self.epoch = epoch" }, { "identifier": "DsEvalBatchSampler", "path": "utils/training_utils.py", "snippet": "class DsEvalBatchSampler(Sampler):\n def __init__(self, dataset, max_batch_frames, max_batch_size, rank=None, batch_by_size=True) -> None:\n self.dataset = dataset\n self.max_batch_frames = max_batch_frames\n self.max_batch_size = max_batch_size\n self.rank = rank\n self.batch_by_size = batch_by_size\n self.batches = None\n self.batch_size = max_batch_size\n self.drop_last = False\n\n if self.rank == 0:\n indices = list(range(len(self.dataset)))\n if self.batch_by_size:\n self.batches = utils.batch_by_size(\n indices, self.dataset.num_frames,\n max_batch_frames=self.max_batch_frames, max_batch_size=self.max_batch_size\n )\n else:\n self.batches = [\n indices[i:i + self.max_batch_size]\n for i in range(0, len(indices), self.max_batch_size)\n ]\n else:\n self.batches = [[0]]\n\n def __iter__(self):\n return iter(self.batches)\n\n def __len__(self):\n return len(self.batches)" }, { "identifier": "get_latest_checkpoint_path", "path": "utils/training_utils.py", "snippet": "def get_latest_checkpoint_path(work_dir):\n if not isinstance(work_dir, Path):\n work_dir = Path(work_dir)\n if not work_dir.exists():\n return None\n\n last_step = -1\n last_ckpt_name = None\n\n for ckpt in work_dir.glob('model_ckpt_steps_*.ckpt'):\n search = re.search(r'steps_\\d+', ckpt.name)\n if search:\n step = int(search.group(0)[6:])\n if step > last_step:\n last_step = step\n last_ckpt_name = str(ckpt)\n\n return last_ckpt_name if last_ckpt_name is not None else None" }, { "identifier": "PitchAdjustableMelSpectrogram", "path": "utils/wav2mel.py", "snippet": "class PitchAdjustableMelSpectrogram:\n def __init__(\n self,\n sample_rate=44100,\n n_fft=2048,\n win_length=2048,\n hop_length=512,\n f_min=40,\n f_max=16000,\n n_mels=128,\n center=False,\n ):\n self.sample_rate = sample_rate\n self.n_fft = n_fft\n self.win_size = win_length\n self.hop_length = hop_length\n self.f_min = f_min\n self.f_max = f_max\n self.n_mels = n_mels\n self.center = center\n\n self.mel_basis = {}\n self.hann_window = {}\n\n def __call__(self, y, key_shift=0, speed=1.0):\n factor = 2 ** (key_shift / 12)\n n_fft_new = int(np.round(self.n_fft * factor))\n win_size_new = int(np.round(self.win_size * factor))\n hop_length = int(np.round(self.hop_length * speed))\n\n # if torch.min(y) < -1.0:\n # logger.warning(f\"min value is {torch.min(y)}\")\n # if torch.max(y) > 1.0:\n # logger.warning(f\"max value is {torch.max(y)}\")\n\n mel_basis_key = f\"{self.f_max}_{y.device}\"\n if mel_basis_key not in self.mel_basis:\n mel = librosa_mel_fn(\n sr=self.sample_rate,\n n_fft=self.n_fft,\n n_mels=self.n_mels,\n fmin=self.f_min,\n fmax=self.f_max,\n )\n self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)\n\n hann_window_key = f\"{key_shift}_{y.device}\"\n if hann_window_key not in self.hann_window:\n self.hann_window[hann_window_key] = torch.hann_window(\n win_size_new, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (\n int((win_size_new - hop_length) // 2),\n int((win_size_new - hop_length+1) // 2),\n ),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft_new,\n hop_length=hop_length,\n win_length=win_size_new,\n window=self.hann_window[hann_window_key],\n center=self.center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=True,\n ).abs()\n # spec = torch.view_as_real(spec)\n # spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))\n\n if key_shift != 0:\n size = self.n_fft // 2 + 1\n resize = spec.size(1)\n if resize < size:\n spec = F.pad(spec, (0, 0, 0, size - resize))\n\n spec = spec[:, :size, :] * self.win_size / win_size_new\n\n spec = torch.matmul(self.mel_basis[mel_basis_key], spec)\n\n return spec\n\n def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)" } ]
import logging import os import pathlib import random import sys import lightning.pytorch as pl import matplotlib import numpy as np import torch.utils.data import utils from typing import Dict from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only from matplotlib import pyplot as plt from torch import nn from torch.utils.data import Dataset from torchmetrics import Metric, MeanMetric from models.nsf_HiFigan_chroma.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator from modules.loss.HiFiloss import HiFiloss from training.base_task_gan import GanBaseTask from utils.training_utils import ( DsBatchSampler, DsEvalBatchSampler, get_latest_checkpoint_path ) from utils.wav2mel import PitchAdjustableMelSpectrogram
11,452
start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class nsf_HiFigan_chroma(GanBaseTask): def __init__(self, config): super().__init__(config) self.TF = PitchAdjustableMelSpectrogram( f_min=0, f_max=None, n_mels=256,) self.logged_gt_wav = set() self.stft=stftlog() def build_dataset(self): self.train_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'train_set_name']) self.valid_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'valid_set_name'], infer=True) def build_model(self): cfg=self.config['model_args'] cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']}) h=AttrDict(cfg) self.generator=Generator(h)
# from utils.indexed_datasets import IndexedDataset def spec_to_figure(spec, vmin=None, vmax=None): if isinstance(spec, torch.Tensor): spec = spec.cpu().numpy() fig = plt.figure(figsize=(12, 9),dpi=100) plt.pcolor(spec.T, vmin=vmin, vmax=vmax) plt.tight_layout() return fig class nsf_HiFigan_dataset(Dataset): def __init__(self, config: dict, data_dir, infer=False): super().__init__() self.config = config self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir) with open(self.data_dir, 'r', encoding='utf8') as f: fills = f.read().strip().split('\n') self.data_index = fills self.infer = infer self.volume_aug = self.config['volume_aug'] self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0 def __getitem__(self, index): data_path = self.data_index[index] data = np.load(data_path) return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio']} def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class nsf_HiFigan_chroma(GanBaseTask): def __init__(self, config): super().__init__(config) self.TF = PitchAdjustableMelSpectrogram( f_min=0, f_max=None, n_mels=256,) self.logged_gt_wav = set() self.stft=stftlog() def build_dataset(self): self.train_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'train_set_name']) self.valid_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'valid_set_name'], infer=True) def build_model(self): cfg=self.config['model_args'] cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']}) h=AttrDict(cfg) self.generator=Generator(h)
self.discriminator=nn.ModuleDict({'msd':MultiScaleDiscriminator(), 'mpd':MultiPeriodDiscriminator(periods=cfg['discriminator_periods'])})
2
2023-10-17 13:45:09+00:00
16k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.utils.transform.Transform`.\n The data fields of all the instantiated sentences can be accessed as an attribute of the dataset.\n\n Args:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform` or its derivations.\n The instance holds a series of loading and processing behaviours with regard to the specific data format.\n data (Union[str, Iterable]):\n A filename or a list of instances that will be passed into :meth:`transform.load`.\n cache (bool):\n If ``True``, tries to use the previously cached binarized data for fast loading.\n In this way, sentences are loaded on-the-fly according to the meta data.\n If ``False``, all sentences will be directly loaded into the memory.\n Default: ``False``.\n binarize (bool):\n If ``True``, binarizes the dataset once building it. Only works if ``cache=True``. Default: ``False``.\n bin (str):\n Path for saving binarized files, required if ``cache=True``. Default: ``None``.\n max_len (int):\n Sentences exceeding the length will be discarded. Default: ``None``.\n kwargs (Dict):\n Together with `data`, kwargs will be passed into :meth:`transform.load` to control the loading behaviour.\n\n Attributes:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform`.\n sentences (List[Sentence]):\n A list of sentences loaded from the data.\n Each sentence includes fields obeying the data format defined in ``transform``.\n If ``cache=True``, each is a pointer to the sentence stored in the cache file.\n \"\"\"\n\n def __init__(\n self,\n transform: Transform,\n data: Union[str, Iterable],\n cache: bool = False,\n binarize: bool = False,\n bin: str = None,\n max_len: int = None,\n **kwargs\n ) -> Dataset:\n super(Dataset, self).__init__()\n\n self.transform = transform\n self.data = data\n self.cache = cache\n self.binarize = binarize\n self.bin = bin\n self.max_len = max_len or INF\n self.kwargs = kwargs\n\n if cache:\n if not isinstance(data, str) or not os.path.exists(data):\n raise FileNotFoundError(\"Only files are allowed for binarization, but not found\")\n if self.bin is None:\n self.fbin = data + '.pt'\n else:\n os.makedirs(self.bin, exist_ok=True)\n self.fbin = os.path.join(self.bin, os.path.split(data)[1]) + '.pt'\n if not self.binarize and os.path.exists(self.fbin):\n try:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n except Exception:\n raise RuntimeError(f\"Error found while debinarizing {self.fbin}, which may have been corrupted. \"\n \"Try re-binarizing it first\")\n else:\n self.sentences = list(transform.load(data, **kwargs))\n\n def __repr__(self):\n s = f\"{self.__class__.__name__}(\"\n s += f\"n_sentences={len(self.sentences)}\"\n if hasattr(self, 'loader'):\n s += f\", n_batches={len(self.loader)}\"\n if hasattr(self, 'buckets'):\n s += f\", n_buckets={len(self.buckets)}\"\n if self.shuffle:\n s += f\", seed={self.seed}\"\n if self.cache:\n s += f\", cache={self.cache}\"\n if self.binarize:\n s += f\", binarize={self.binarize}\"\n if self.max_len < INF:\n s += f\", max_len={self.max_len}\"\n s += \")\"\n return s\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, index):\n return debinarize(self.fbin, self.sentences[index]) if self.cache else self.sentences[index]\n\n def __getattr__(self, name):\n if name not in {f.name for f in self.transform.flattened_fields}:\n raise AttributeError\n if self.cache:\n if os.path.exists(self.fbin) and not self.binarize:\n sentences = self\n else:\n sentences = self.transform.load(self.data, **self.kwargs)\n return (getattr(sentence, name) for sentence in sentences)\n return [getattr(sentence, name) for sentence in self.sentences]\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n @lazy_property\n def sizes(self):\n if not self.cache:\n return [s.size for s in self.sentences]\n return debinarize(self.fbin, 'sizes')\n\n def build(\n self,\n batch_size: int,\n n_buckets: int = 1,\n shuffle: bool = False,\n distributed: bool = False,\n n_workers: int = 0,\n pin_memory: bool = True,\n chunk_size: int = 1000,\n seed: int = 1,\n ) -> Dataset:\n # numericalize all fields\n if not self.cache:\n self.sentences = [i for i in self.transform(self.sentences) if len(i) < self.max_len]\n else:\n # if not forced to do binarization and the binarized file already exists, directly load the meta file\n if os.path.exists(self.fbin) and not self.binarize:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n else:\n @contextmanager\n def cache(sentences):\n ftemp = tempfile.mkdtemp()\n fs = os.path.join(ftemp, 'sentences')\n fb = os.path.join(ftemp, os.path.basename(self.fbin))\n global global_transform\n global_transform = self.transform\n sentences = binarize({'sentences': progress_bar(sentences)}, fs)[1]['sentences']\n try:\n yield ((sentences[s:s+chunk_size], fs, f\"{fb}.{i}\", self.max_len)\n for i, s in enumerate(range(0, len(sentences), chunk_size)))\n finally:\n del global_transform\n shutil.rmtree(ftemp)\n\n def numericalize(sentences, fs, fb, max_len):\n sentences = global_transform((debinarize(fs, sentence) for sentence in sentences))\n sentences = [i for i in sentences if len(i) < max_len]\n return binarize({'sentences': sentences, 'sizes': [sentence.size for sentence in sentences]}, fb)[0]\n\n logger.info(f\"Seeking to cache the data to {self.fbin} first\")\n # numericalize the fields of each sentence\n if is_master():\n with cache(self.transform.load(self.data, **self.kwargs)) as chunks, mp.Pool(32) as pool:\n results = [pool.apply_async(numericalize, chunk) for chunk in chunks]\n self.sentences = binarize((r.get() for r in results), self.fbin, merge=True)[1]['sentences']\n if is_dist():\n dist.barrier()\n if not is_master():\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n # NOTE: the final bucket count is roughly equal to n_buckets\n self.buckets = dict(zip(*kmeans(self.sizes, n_buckets)))\n self.loader = DataLoader(transform=self.transform,\n dataset=self,\n batch_sampler=Sampler(self.buckets, batch_size, shuffle, distributed, seed=seed),\n num_workers=n_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory)\n self.seed = seed\n self.shuffle = shuffle\n return self" }, { "identifier": "map_token_ids", "path": "gec/fn.py", "snippet": "def map_token_ids(vocab_0, vocab_1, equal_labels=None):\n \"\"\"\n Map token ids from vocab_0 to vocab_1\n\n Args:\n vocab_0 (dict): vocab_0\n vocab_1 (dict): vocab_1\n equal_labels (dict): equal_labels\n \"\"\"\n if equal_labels is None:\n equal_labels = {}\n return [(i, vocab_1[equal_labels.get(k, k)]) for k, i in vocab_0.items()\n if k in vocab_1]" }, { "identifier": "PerplexityMetric", "path": "gec/metric.py", "snippet": "class PerplexityMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[torch.Tensor] = None,\n golds: Optional[torch.Tensor] = None,\n mask: Optional[torch.BoolTensor] = None,\n reverse: bool = True,\n eps: float = 1e-12) -> PerplexityMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_tokens = 0.\n\n self.tp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n\n self.total_loss = 0.\n\n if loss is not None:\n self(loss, preds, golds, mask)\n\n def __repr__(self):\n s = f\"loss: {self.loss:.4f} PPL: {self.ppl:.4f}\"\n if self.tp > 0:\n s += f\" - TGT: P: {self.p:6.2%} R: {self.r:6.2%} F0.5: {self.f:6.2%}\"\n return s\n\n def __call__(self, loss: float, preds: Tuple[List, torch.Tensor],\n golds: Tuple[List, torch.Tensor],\n mask: torch.BoolTensor) -> PerplexityMetric:\n n_tokens = mask.sum().item()\n self.n += len(mask)\n self.count += 1\n self.n_tokens += n_tokens\n self.total_loss += float(loss) * n_tokens\n\n if preds is not None:\n with tempfile.TemporaryDirectory() as t:\n fsrc, fpred, fgold = os.path.join(t, 'src'), os.path.join(\n t, 'pred'), os.path.join(t, 'gold')\n pred_m2, gold_m2 = os.path.join(t, 'pred.m2'), os.path.join(\n t, 'gold.m2')\n with open(fsrc, 'w') as fs, open(fpred, 'w') as f:\n for s, i in preds:\n fs.write(s + '\\n')\n f.write(i + '\\n')\n with open(fgold, 'w') as f:\n for _, i in golds:\n f.write(i + '\\n')\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fpred}',\n '-out', f'{pred_m2}'\n ])\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fgold}',\n '-out', f'{gold_m2}'\n ])\n out = subprocess.check_output(\n [\n 'errant_compare', '-hyp', f'{pred_m2}', '-ref',\n f'{gold_m2}'\n ],\n stderr=subprocess.STDOUT).decode()\n tp, fp, fn = (int(i) for i in out.split('\\n')[3].split()[:3])\n self.tp += tp\n self.pred += tp + fp\n self.gold += tp + fn\n return self\n\n def __add__(self, other: PerplexityMetric) -> PerplexityMetric:\n metric = PerplexityMetric(eps=self.eps)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.n_tokens = self.n_tokens + other.n_tokens\n metric.total_loss = self.total_loss + other.total_loss\n\n metric.tp = self.tp + other.tp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.f if self.f > 0 else self.ppl\n\n @property\n def loss(self):\n return self.total_loss / self.n_tokens\n\n @property\n def ppl(self):\n return math.pow(2, (self.loss / math.log(2)))\n\n @property\n def p(self):\n return self.tp / (self.pred + self.eps)\n\n @property\n def r(self):\n return self.tp / (self.gold + self.eps)\n\n @property\n def f(self):\n return (1 + 0.5**2) * self.p * self.r / (0.5**2 * self.p + self.r +\n self.eps)" }, { "identifier": "SpanMetric", "path": "gec/metric.py", "snippet": "class SpanMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[List[List[Tuple]]] = None,\n golds: Optional[List[List[Tuple]]] = None,\n reverse: bool = False,\n beta: Optional[float] = 1.,\n eps: float = 1e-12) -> SpanMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_ucm = 0.0\n self.n_lcm = 0.0\n self.n_tr = 0.0\n self.n_fr = 0.0\n self.n_e = 0.0\n self.n_c = 0.0\n self.utp = 0.0\n self.ltp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n self.beta = beta\n\n if loss is not None:\n self(loss, preds, golds)\n\n def __repr__(self):\n s = f\"ErrorSents: {self.n_e:6.0f} CorrectSents: {self.n_c:6.0f} TR: {self.tr:7.2%} FR: {self.fr:7.2%} \"\n # s += f\"GoldSpans: {self.gold:6.0f} PredSpans: {self.pred:6.0f} \"\n s += f\"UP: {self.up:7.2%} UR: {self.ur:7.2%} UF{'' if self.beta == 1.0 else self.beta}: {self.uf:7.2%} \"\n s += f\"LP: {self.lp:7.2%} LR: {self.lr:7.2%} LF{'' if self.beta == 1.0 else self.beta}: {self.lf:7.2%}\"\n return s\n\n def __call__(self, loss: float, preds: List[List[Tuple]],\n golds: List[List[Tuple]]) -> SpanMetric:\n self.n += len(preds)\n self.count += 1\n self.total_loss += float(loss)\n for pred, gold in zip(preds, golds):\n upred, ugold = Counter([tuple(span[:-1])\n for span in pred]), Counter(\n [tuple(span[:-1]) for span in gold])\n lpred, lgold = Counter([tuple(span) for span in pred\n ]), Counter([tuple(span) for span in gold])\n utp, ltp = list((upred & ugold).elements()), list(\n (lpred & lgold).elements())\n self.n_ucm += len(utp) == len(pred) == len(gold)\n self.n_lcm += len(ltp) == len(pred) == len(gold)\n self.n_tr += ((len(gold) > 0) and (len(pred) > 0))\n self.n_fr += ((len(gold) == 0) and (len(pred) > 0))\n self.n_e += (len(gold) > 0)\n self.n_c += (len(gold) == 0)\n self.utp += len(utp)\n self.ltp += len(ltp)\n self.pred += len(pred)\n self.gold += len(gold)\n return self\n\n def __add__(self, other: SpanMetric) -> SpanMetric:\n metric = SpanMetric(eps=self.eps, beta=self.beta)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.total_loss = self.total_loss + other.total_loss\n metric.n_ucm = self.n_ucm + other.n_ucm\n metric.n_lcm = self.n_lcm + other.n_lcm\n metric.n_tr = self.n_tr + other.n_tr\n metric.n_fr = self.n_fr + other.n_fr\n metric.n_e = self.n_e + other.n_e\n metric.n_c = self.n_c + other.n_c\n metric.utp = self.utp + other.utp\n metric.ltp = self.ltp + other.ltp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.lf\n\n @property\n def ucm(self):\n return self.n_ucm / (self.n + self.eps)\n\n @property\n def lcm(self):\n return self.n_lcm / (self.n + self.eps)\n\n @property\n def tr(self):\n return self.n_tr / (self.n_e + self.eps)\n\n @property\n def fr(self):\n return self.n_fr / (self.n_c + self.eps)\n\n @property\n def up(self):\n return self.utp / (self.pred + self.eps)\n\n @property\n def ur(self):\n return self.utp / (self.gold + self.eps)\n\n @property\n def uf(self):\n return (1 + self.beta**2) * self.utp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)\n\n @property\n def lp(self):\n return self.ltp / (self.pred + self.eps)\n\n @property\n def lr(self):\n return self.ltp / (self.gold + self.eps)\n\n @property\n def lf(self):\n return (1 + self.beta**2) * self.ltp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)" }, { "identifier": "Seq2SeqDetectModel", "path": "gec/model.py", "snippet": "class Seq2SeqDetectModel(Seq2SeqModel):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_labels,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=1024,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n del self.classifier\n self.error_classifier = nn.Linear(self.model.config.d_model,\n self.args.n_labels)\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def loss(self, x, tgt, src_error, tgt_error, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, tgt_mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n y = self.decoder_dropout(y)\n # s_src_error = self.error_classifier(x[:, 1:-1])\n s_tgt_error = self.error_classifier(y)\n\n # src_mask = src_mask[:, 2:]\n\n if \"partial\" in self.args.error_schema:\n # src_mask = src_mask & (src_error != self.args.nul_index)\n tgt_mask = tgt_mask & (tgt_error != self.args.nul_index)\n # src_error_loss = self.criterion(s_src_error[src_mask], src_error[src_mask])\n tgt_error_loss = self.criterion(s_tgt_error[tgt_mask],\n tgt_error[tgt_mask])\n # return src_error_loss + tgt_error_loss\n return tgt_error_loss\n\n def decode(self, x, tgt, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n s_errors = self.error_classifier(y)\n if \"partial\" in self.args.error_schema:\n s_errors[...,\n self.args.nul_index] = torch.finfo(s_errors.dtype).min\n errors = s_errors.argmax(-1)\n errors[~mask] = -1\n\n return errors" }, { "identifier": "Seq2SeqModel", "path": "gec/model.py", "snippet": "class Seq2SeqModel(Model):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=512,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n if self.args.encoder == 'transformer':\n self.token_dropout = TokenDropout(self.args.token_dropout)\n self.decoder = TransformerDecoder(\n layer=TransformerDecoderLayer(\n n_heads=self.args.n_decoder_heads,\n n_model=self.args.n_decoder_hidden,\n n_inner=self.args.n_decoder_inner,\n dropout=self.args.decoder_dropout),\n n_layers=self.args.n_decoder_layers)\n\n else:\n from transformers import AutoModel\n self.model = AutoModel.from_pretrained(self.args.bart,\n dropout=self.args.dropout)\n self.encoder, self.decoder = self.model.encoder, self.model.decoder\n self.decoder_dropout = nn.Dropout(self.args.decoder_dropout)\n self.classifier = nn.Linear(self.args.n_encoder_hidden,\n self.args.n_words)\n self.classifier.weight = (self.word_embed.embed\n if self.args.encoder == 'transformer' else\n self.model.shared).weight\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def forward(self, words):\n r\"\"\"\n Args:\n words (~torch.LongTensor): ``[batch_size, seq_len]``.\n Word indices.\n\n Returns:\n ~torch.Tensor:\n Representations for the src sentences of the shape ``[batch_size, seq_len, n_model]``.\n \"\"\"\n # we need to do token dropout, so the TranformerWordEmbedding layer is not invoked here\n if self.args.encoder == 'transformer':\n embed = self.token_dropout(self.word_embed.embed(words))\n embed = embed * self.word_embed.embed_scale + self.word_embed.pos_embed(\n embed)\n embed = self.embed_dropout(embed)\n return self.encoder(embed, words.ne(self.args.pad_index))\n else:\n return self.encoder(input_ids=words,\n attention_mask=words.ne(\n self.args.pad_index))[0]\n\n def loss(self, x, tgt, src_mask, tgt_mask):\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n y = self.decoder_dropout(y)\n s_y = self.classifier(y)\n return self.criterion(s_y[tgt_mask], tgt[tgt_mask])\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (tuple(\n past_state.index_select(0, beam_idx)\n for past_state in layer_past), )\n return reordered_past\n\n def decode(self, x, src_mask):\n batch_size, *_ = x.shape\n beam_size, n_words = self.args.beam_size, self.args.n_words\n\n # repeat the src inputs beam_size times\n # [batch_size * beam_size, ...]\n x = x.unsqueeze(1).repeat(1, beam_size, 1, 1).view(-1, *x.shape[1:])\n src_mask = src_mask.unsqueeze(1).repeat(1, beam_size, 1).view(\n -1, *src_mask.shape[1:])\n # initialize the tgt inputs by <bos>\n # [batch_size * beam_size, seq_len]\n tgt = x.new_full((batch_size * beam_size, 1),\n self.args.bos_index,\n dtype=torch.long)\n # [batch_size * beam_size]\n active = src_mask.new_ones(batch_size * beam_size)\n # [batch_size]\n batches = tgt.new_tensor(range(batch_size)) * beam_size\n # accumulated scores\n scores = x.new_full((batch_size, self.args.beam_size),\n MIN).index_fill_(-1, tgt.new_tensor(0), 0).view(-1)\n\n def rank(scores, mask, k):\n scores = scores / mask.sum(-1).unsqueeze(\n -1)**self.args.length_penalty\n return scores.view(batch_size, -1).topk(k, -1)[1]\n\n if self.args.encoder != 'transformer':\n past_key_values = self.decoder(\n input_ids=torch.full_like(tgt[:, :1], self.args.eos_index),\n attention_mask=torch.ones_like(src_mask[:, :1]),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=None,\n use_cache=True)[1]\n\n for t in range(1, min(self.args.max_len + 1, int(1.8 * x.shape[1]))):\n tgt_mask = tgt.ne(self.args.pad_index)\n if self.args.encoder == 'transformer':\n attn_mask = tgt_mask.new_ones(t, t).tril_()\n s_y = self.decoder(self.embed(tgt[active]), x[active],\n tgt_mask[active], src_mask[active],\n attn_mask)\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n # [batch_size * beam_size, n_words]\n s_y = x.new_full((batch_size * beam_size, n_words),\n MIN).masked_scatter_(active.unsqueeze(-1),\n s_y)\n else:\n input_ids = tgt[:, -1:]\n s_y, new_past_key_values = self.decoder(\n input_ids=input_ids,\n attention_mask=torch.cat(\n (torch.ones_like(tgt_mask[:, :1]), tgt_mask), 1),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=past_key_values,\n use_cache=True)[:2]\n del past_key_values\n past_key_values = new_past_key_values\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n s_y[~active] = MIN\n\n s_y[~active, self.args.pad_index] = 0\n\n # [batch_size * beam_size, n_words]\n scores = scores.unsqueeze(-1) + s_y\n # [batch_size, beam_size]\n cands = rank(scores, tgt_mask, beam_size)\n # [batch_size * beam_size]\n scores = scores.view(batch_size, -1).gather(-1, cands).view(-1)\n # beams, tokens = cands // n_words, cands % n_words\n beams, tokens = cands.div(\n n_words, rounding_mode='floor'), (cands % n_words).view(-1, 1)\n indices = (batches.unsqueeze(-1) + beams).view(-1)\n # [batch_size * beam_size, seq_len + 1]\n tgt = torch.cat((tgt[indices], tokens), 1)\n past_key_values = self._reorder_cache(past_key_values, indices)\n active = tokens.ne(\n tokens.new_tensor(\n (self.args.eos_index, self.args.pad_index))).all(-1)\n\n if not active.any():\n break\n cands = rank(scores.view(-1, 1), tgt.ne(self.args.pad_index),\n self.args.topk)\n return tgt[(batches.unsqueeze(-1) + cands).view(-1)].view(\n batch_size, self.args.topk, -1)" }, { "identifier": "Field", "path": "gec/transform.py", "snippet": "class Field(supar.utils.Field):\n r\"\"\"\n Defines a datatype together with instructions for converting to :class:`~torch.Tensor`.\n :class:`Field` models common text processing datatypes that can be represented by tensors.\n It holds a :class:`~supar.utils.vocab.Vocab` object that defines the set of possible values\n for elements of the field and their corresponding numerical representations.\n The :class:`Field` object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method.\n\n Args:\n name (str):\n The name of the field.\n pad_token (str):\n The string token used as padding. Default: ``None``.\n unk_token (str):\n The string token used to represent OOV words. Default: ``None``.\n bos_token (str):\n A token that will be prepended to every example using this field, or ``None`` for no `bos_token`.\n Default: ``None``.\n eos_token (str):\n A token that will be appended to every example using this field, or ``None`` for no `eos_token`.\n lower (bool):\n Whether to lowercase the text in this field. Default: ``False``.\n use_vocab (bool):\n Whether to use a :class:`~supar.utils.vocab.Vocab` object.\n If ``False``, the data in this field should already be numerical.\n Default: ``True``.\n tokenize (function):\n The function used to tokenize strings using this field into sequential examples. Default: ``None``.\n fn (function):\n The function used for preprocessing the examples. Default: ``None``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.padding_side = kwargs.pop('padding_side') if 'padding_side' in kwargs else 'right'\n super().__init__(*args, **kwargs)\n\n def compose(self, batch: Iterable[torch.Tensor]) -> torch.Tensor:\n r\"\"\"\n Composes a batch of sequences into a padded tensor.\n\n Args:\n batch (Iterable[~torch.Tensor]):\n A list of tensors.\n\n Returns:\n A padded tensor converted to proper device.\n \"\"\"\n\n return pad(batch, self.pad_index, padding_side=self.padding_side).to(self.device, non_blocking=True)" }, { "identifier": "Text", "path": "gec/transform.py", "snippet": "class Text(Transform):\n\n fields = ['SRC', 'TGT']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None\n ) -> Text:\n super().__init__()\n\n self.SRC = SRC\n self.TGT = TGT\n\n @property\n def src(self):\n return self.SRC,\n\n @property\n def tgt(self):\n return self.TGT,\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (str or Iterable):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n index, sentence = 0, []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n sentence = TextSentence(self, sentence, index)\n yield sentence\n index += 1\n sentence = []\n else:\n sentence.append(line)" }, { "identifier": "Tree", "path": "gec/transform.py", "snippet": "class Tree(Transform):\n\n fields = ['SRC', 'TGT', 'SRCERROR', 'TGTERROR']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None,\n SRCERROR: Optional[Union[Field, Iterable[Field]]] = None,\n TGTERROR: Optional[Union[Field, Iterable[Field]]] = None,\n **kwargs\n ) -> Tree:\n super().__init__()\n self.error_schema = kwargs.pop('error_schema') if 'error_schema' in kwargs else 'last'\n self.fine_error_type = kwargs.pop('fine_error_type') if 'fine_error_type' in kwargs else False\n\n self.SRC = SRC\n self.TGT = TGT\n self.SRCERROR = SRCERROR\n self.TGTERROR = TGTERROR\n\n @property\n def src(self):\n return self.SRC, self.TGT\n\n @property\n def tgt(self):\n return self.SRCERROR, self.TGTERROR\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (Union[str, Iterable]):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n def consume(lines, chunksize=10000):\n index, sentence, chunk = 0, [], []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n chunk.append((sentence, index))\n if len(chunk) == chunksize:\n yield chunk\n chunk = []\n index += 1\n sentence = []\n else:\n sentence.append(line)\n if len(chunk) > 0:\n yield chunk\n\n @contextmanager\n def cache(lines):\n global global_transform\n global_transform = self\n ftemp = tempfile.mkdtemp()\n fbin = os.path.join(ftemp, 'data')\n try:\n yield ((chunk, f\"{fbin}.{i}\") for i, chunk in enumerate(consume(lines))), fbin\n finally:\n if dist.is_initialized() and not is_master():\n dist.barrier()\n del global_transform\n shutil.rmtree(ftemp)\n\n with cache(lines) as (chunks, fbin):\n if is_master():\n def process(chunk, fb):\n sentences = [TreeSentence(global_transform, *s) for s in progress_bar(chunk)]\n sentences = [s for s in sentences if s.vaild]\n return binarize({'sentences': sentences}, fb)[0]\n with mp.Pool(32) as pool:\n results = [pool.apply_async(process, (chunk, fb)) for chunk, fb in chunks]\n binarize((r.get() for r in results), fbin, merge=True)\n if dist.is_initialized() and not is_master():\n fbin = gather(fbin)[0]\n dist.barrier()\n for s in debinarize(fbin, meta=True)['sentences']:\n yield debinarize(fbin, s)" } ]
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
14,108
self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart)
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq' MODEL = Seq2SeqModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: args.bin = os.path.join(os.path.dirname(args.path), 'bin') train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") self.optimizer = AdamW(self.model.parameters(), args.lr, (args.mu, args.nu), args.eps, args.weight_decay) steps = len(train.loader) * epochs // args.update_steps self.scheduler = PolynomialLR(self.optimizer, warmup_steps=self.args.warmup_steps, steps=steps) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart)
SRC = Field('src',
6
2023-10-18 10:55:33+00:00
16k
jianlanluo/SAQ
vqn/conservative_sac_main.py
[ { "identifier": "VQN", "path": "vqn/vqn.py", "snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantization_cost = 1.0\n config.entropy_loss_ratio = 0.0\n config.entropy_loss_type = \"softmax\"\n config.entropy_temperature = 1.0\n config.vqvae_arch = '512-512'\n config.action_only_quantization = False\n config.reconstruction_loss_type = 'l2'\n config.vqvae_lr = 3e-4\n\n config.discount = 0.99\n config.qf_arch = '512-512'\n config.qf_lr = 3e-4\n config.target_update_period = 200\n config.reset_qf = False\n config.td_loss_weight = 1.0\n\n config.bc_loss_weight = 0.0\n\n config.action_selection_threshold = 0.0\n\n config.cql_temp = 1.0\n config.cql_min_q_weight = 0.0\n \n config.qf_weight_decay = 0.0\n\n config.q_value_penalty_weight = 0.0\n config.q_value_penalty_type = 'l1'\n config.q_value_penalty_aggregation = 'mean'\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, observation_dim, action_dim):\n self.config = self.get_default_config(config)\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n\n self.vqvae = ActionVQVAE(\n observation_dim=self.observation_dim,\n action_dim=self.action_dim,\n embedding_dim=self.config.embedding_dim,\n codebook_size=self.config.codebook_size,\n commitment_cost=self.config.commitment_cost,\n quantization_cost=self.config.quantization_cost,\n entropy_loss_ratio=self.config.entropy_loss_ratio,\n entropy_loss_type=self.config.entropy_loss_type,\n entropy_temperature=self.config.entropy_temperature,\n arch=self.config.vqvae_arch,\n action_only_quantization=self.config.action_only_quantization,\n reconstruction_loss_type=self.config.reconstruction_loss_type,\n )\n\n self._vqvae_train_state = TrainState.create(\n params=self.vqvae.init(\n next_rng(self.vqvae.rng_keys()),\n jnp.zeros((1, observation_dim)),\n jnp.zeros((1, action_dim)),\n train=True\n ),\n tx=optax.adam(self.config.vqvae_lr),\n apply_fn=None,\n )\n self._vqvae_total_steps = 0\n\n self.qf = FullyConnectedNetwork(\n output_dim=self.config.codebook_size,\n arch=self.config.qf_arch,\n )\n\n qf_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((1, observation_dim)),\n )\n\n self._qf_optimizer = optax.adam(self.config.qf_lr)\n self._qf_train_state = DQNTrainState.create(\n params=qf_params,\n target_params=deepcopy(qf_params),\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._dqn_total_steps = 0\n\n self._sampler_policy = VQSamplerPolicy(\n self.qf, self.vqvae,\n self._qf_train_state.params, self._vqvae_train_state.params\n )\n\n\n def train_vqvae(self, batch):\n self._vqvae_train_state, metrics = self._vqvae_train_step(\n next_rng(), self._vqvae_train_state, batch\n )\n self._vqvae_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', ))\n def _vqvae_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n reconstructed, result_dict = self.vqvae.apply(\n train_params,\n observations,\n actions,\n train=True,\n )\n return result_dict['loss'], result_dict\n\n grads, aux_values = grad_fn(train_state.params)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'reconstruction_loss', 'quantizer_loss', 'e_latent_loss', 'q_latent_loss',\n 'entropy_loss', 'action_prior_loss', 'action_prior_accuracy'],\n )\n return new_train_state, metrics\n\n def train_dqn(self, batch, bc=False):\n self._qf_train_state, metrics = self._dqn_train_step(\n next_rng(), self._qf_train_state, self._vqvae_train_state, batch,\n bc\n )\n self._dqn_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _dqn_train_step(self, rng, qf_train_state, vqvae_train_state, batch, bc=False):\n observations = batch['observations']\n original_actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n rng_generator = JaxRNG(rng)\n\n actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n original_actions,\n method=self.vqvae.encode\n )\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n def select_by_action(q_vals, actions):\n return jnp.squeeze(\n jnp.take_along_axis(\n q_vals, jnp.expand_dims(actions, -1), axis=-1\n ),\n axis=-1\n )\n\n def select_actions(params, observations):\n q_values = self.qf.apply(params, observations)\n action_priors = jax.nn.softmax(\n self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n method=self.vqvae.action_prior_logits\n ),\n axis=-1\n )\n action_selection_threshold = jnp.minimum(\n jnp.amax(action_priors, axis=-1, keepdims=True),\n self.config.action_selection_threshold\n )\n action_mask = (\n action_priors >= action_selection_threshold\n ).astype(jnp.float32)\n masked_q_values = (\n action_mask * q_values + (1.0 - action_mask) * jnp.min(q_values)\n )\n return jnp.argmax(masked_q_values, axis=-1)\n\n\n q_values = self.qf.apply(train_params, observations)\n current_actions_q_values = select_by_action(q_values, actions)\n next_q_values = self.qf.apply(qf_train_state.target_params, next_observations)\n next_actions = select_actions(train_params, next_observations)\n target_q_values = select_by_action(next_q_values, next_actions)\n\n td_target = rewards + (1. - dones) * self.config.discount * target_q_values\n\n td_loss = mse_loss(current_actions_q_values, jax.lax.stop_gradient(td_target))\n loss = self.config.td_loss_weight * td_loss\n\n current_actions = jnp.argmax(q_values, axis=-1)\n max_q_values = jnp.max(q_values, axis=-1)\n advantage = max_q_values - current_actions_q_values\n\n policy_dataset_aggrement_rate = jnp.mean(current_actions == actions)\n reconstructed_current_actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n current_actions,\n method=self.vqvae.decode\n )\n current_action_mse = jnp.sum(\n jnp.square(reconstructed_current_actions - original_actions),\n axis=-1\n ).mean()\n\n bc_loss = jnp.mean(optax.softmax_cross_entropy_with_integer_labels(q_values, actions))\n loss = loss + self.config.bc_loss_weight * bc_loss\n\n cql_lse_q_values = self.config.cql_temp * jax.scipy.special.logsumexp(\n q_values / self.config.cql_temp, axis=-1\n )\n cql_min_q_loss = jnp.mean(cql_lse_q_values - current_actions_q_values)\n loss = loss + self.config.cql_min_q_weight * cql_min_q_loss\n\n if self.config.q_value_penalty_aggregation == 'none':\n aggregated_q_values = q_values\n elif self.config.q_value_penalty_aggregation == 'mean':\n aggregated_q_values = jnp.mean(q_values)\n else:\n raise ValueError('Unsupport value penalty aggregation type!')\n\n if self.config.q_value_penalty_type == 'l1':\n q_value_penalty_loss = jnp.mean(jnp.abs(aggregated_q_values))\n elif self.config.q_value_penalty_type == 'l2':\n q_value_penalty_loss = jnp.mean(jnp.square(aggregated_q_values))\n else:\n raise ValueError('Unsupport value penalty type!')\n\n loss = loss + self.config.q_value_penalty_weight * q_value_penalty_loss\n\n if bc:\n loss = bc_loss\n\n return loss, locals()\n\n grads, aux_values = grad_fn(qf_train_state.params)\n new_target_params = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n lambda: qf_train_state.params,\n lambda: qf_train_state.target_params,\n )\n if self.config.reset_qf:\n def reset_qf_params():\n qf_params = self.qf.init(\n rng_generator(self.qf.rng_keys()),\n jnp.zeros((1, self.observation_dim)),\n )\n return DQNTrainState.create(\n params=qf_params,\n target_params=new_target_params,\n tx=self._qf_optimizer,\n apply_fn=None,\n )\n\n new_qf_train_state = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n reset_qf_params,\n lambda: qf_train_state.apply_gradients(grads=grads, target_params=new_target_params)\n )\n else:\n new_qf_train_state = qf_train_state.apply_gradients(\n grads=grads, target_params=new_target_params\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'current_actions_q_values', 'max_q_values', 'target_q_values',\n 'advantage', 'td_target', 'td_loss', 'cql_lse_q_values', 'cql_min_q_loss',\n 'policy_dataset_aggrement_rate', 'bc_loss', 'current_action_mse',\n 'q_value_penalty_loss'],\n )\n\n return new_qf_train_state, metrics\n\n def get_sampler_policy(self):\n return self._sampler_policy.update_params(\n self._qf_train_state.params, self._vqvae_train_state.params\n )" }, { "identifier": "ConservativeSAC", "path": "vqn/conservative_sac.py", "snippet": "class ConservativeSAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 0.0\n config.use_automatic_entropy_tuning = False\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.policy_weight_decay = 0.0\n config.qf_lr = 3e-4\n config.qf_weight_decay = 0.0\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n config.use_cql = False\n config.cql_n_actions = 10\n config.cql_importance_sample = True\n config.cql_lagrange = False\n config.cql_target_action_gap = 1.0\n config.cql_temp = 1.0\n config.cql_min_q_weight = 5.0\n config.cql_max_target_backup = False\n config.cql_clip_diff_min = -np.inf\n config.cql_clip_diff_max = np.inf\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optax.adamw(self.config.qf_lr, self.config.policy_weight_decay),\n apply_fn=None\n )\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n self.log_alpha = Scalar(0.0)\n self._train_states['log_alpha'] = TrainState.create(\n params=self.log_alpha.init(next_rng()),\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha')\n\n if self.config.cql_lagrange:\n self.log_alpha_prime = Scalar(1.0)\n self._train_states['log_alpha_prime'] = TrainState.create(\n params=self.log_alpha_prime.init(next_rng()),\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha_prime')\n\n self._model_keys = tuple(model_keys)\n self._total_steps = 0\n\n def train(self, batch, bc=False):\n self._total_steps += 1\n self._train_states, self._target_qf_params, metrics = self._train_step(\n self._train_states, self._target_qf_params, next_rng(), batch, bc\n )\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _train_step(self, train_states, target_qf_params, rng, batch, bc=False):\n rng_generator = JaxRNG(rng)\n\n def loss_fn(train_params):\n observations = batch['observations']\n actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n\n loss_collection = {}\n\n @wrap_function_with_rng(rng_generator())\n def forward_policy(rng, *args, **kwargs):\n return self.policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_qf(rng, *args, **kwargs):\n return self.qf.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.qf.rng_keys())\n )\n\n new_actions, log_pi = forward_policy(train_params['policy'], observations)\n\n if self.config.use_automatic_entropy_tuning:\n alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()\n loss_collection['log_alpha'] = alpha_loss\n alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier\n else:\n alpha_loss = 0.0\n alpha = self.config.alpha_multiplier\n\n \"\"\" Policy loss \"\"\"\n if bc:\n log_probs = forward_policy(train_params['policy'], observations, actions, method=self.policy.log_prob)\n policy_loss = (alpha*log_pi - log_probs).mean()\n else:\n q_new_actions = jnp.minimum(\n forward_qf(train_params['qf1'], observations, new_actions),\n forward_qf(train_params['qf2'], observations, new_actions),\n )\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n\n loss_collection['policy'] = policy_loss\n\n \"\"\" Q function loss \"\"\"\n q1_pred = forward_qf(train_params['qf1'], observations, actions)\n q2_pred = forward_qf(train_params['qf2'], observations, actions)\n\n if self.config.cql_max_target_backup:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n max_target_indices = jnp.expand_dims(jnp.argmax(target_q_values, axis=-1), axis=-1)\n target_q_values = jnp.take_along_axis(target_q_values, max_target_indices, axis=-1).squeeze(-1)\n next_log_pi = jnp.take_along_axis(next_log_pi, max_target_indices, axis=-1).squeeze(-1)\n else:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n\n if self.config.backup_entropy:\n target_q_values = target_q_values - alpha * next_log_pi\n\n td_target = jax.lax.stop_gradient(\n rewards + (1. - dones) * self.config.discount * target_q_values\n )\n qf1_loss = mse_loss(q1_pred, td_target)\n qf2_loss = mse_loss(q2_pred, td_target)\n\n ### CQL\n if self.config.use_cql:\n batch_size = actions.shape[0]\n cql_random_actions = jax.random.uniform(\n rng_generator(), shape=(batch_size, self.config.cql_n_actions, self.action_dim),\n minval=-1.0, maxval=1.0\n )\n\n cql_current_actions, cql_current_log_pis = forward_policy(\n train_params['policy'], observations, repeat=self.config.cql_n_actions,\n )\n cql_next_actions, cql_next_log_pis = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions,\n )\n\n cql_q1_rand = forward_qf(train_params['qf1'], observations, cql_random_actions)\n cql_q2_rand = forward_qf(train_params['qf2'], observations, cql_random_actions)\n cql_q1_current_actions = forward_qf(train_params['qf1'], observations, cql_current_actions)\n cql_q2_current_actions = forward_qf(train_params['qf2'], observations, cql_current_actions)\n cql_q1_next_actions = forward_qf(train_params['qf1'], observations, cql_next_actions)\n cql_q2_next_actions = forward_qf(train_params['qf2'], observations, cql_next_actions)\n\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand, jnp.expand_dims(q1_pred, 1), cql_q1_next_actions, cql_q1_current_actions], axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand, jnp.expand_dims(q2_pred, 1), cql_q2_next_actions, cql_q2_current_actions], axis=1\n )\n cql_std_q1 = jnp.std(cql_cat_q1, axis=1)\n cql_std_q2 = jnp.std(cql_cat_q2, axis=1)\n\n if self.config.cql_importance_sample:\n random_density = np.log(0.5 ** self.action_dim)\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand - random_density,\n cql_q1_next_actions - cql_next_log_pis,\n cql_q1_current_actions - cql_current_log_pis],\n axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand - random_density,\n cql_q2_next_actions - cql_next_log_pis,\n cql_q2_current_actions - cql_current_log_pis],\n axis=1\n )\n\n cql_qf1_ood = (\n jax.scipy.special.logsumexp(cql_cat_q1 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n cql_qf2_ood = (\n jax.scipy.special.logsumexp(cql_cat_q2 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n\n \"\"\"Subtract the log likelihood of data\"\"\"\n cql_qf1_diff = jnp.clip(\n cql_qf1_ood - q1_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n cql_qf2_diff = jnp.clip(\n cql_qf2_ood - q2_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n\n if self.config.cql_lagrange:\n alpha_prime = jnp.clip(\n jnp.exp(self.log_alpha_prime.apply(train_params['log_alpha_prime'])),\n a_min=0.0, a_max=1000000.0\n )\n cql_min_qf1_loss = alpha_prime * self.config.cql_min_q_weight * (cql_qf1_diff - self.config.cql_target_action_gap)\n cql_min_qf2_loss = alpha_prime * self.config.cql_min_q_weight * (cql_qf2_diff - self.config.cql_target_action_gap)\n\n alpha_prime_loss = (-cql_min_qf1_loss - cql_min_qf2_loss)*0.5\n\n loss_collection['log_alpha_prime'] = alpha_prime_loss\n\n else:\n cql_min_qf1_loss = cql_qf1_diff * self.config.cql_min_q_weight\n cql_min_qf2_loss = cql_qf2_diff * self.config.cql_min_q_weight\n alpha_prime_loss = 0.0\n alpha_prime = 0.0\n\n qf1_loss = qf1_loss + cql_min_qf1_loss\n qf2_loss = qf2_loss + cql_min_qf2_loss\n\n loss_collection['qf1'] = qf1_loss\n loss_collection['qf2'] = qf2_loss\n return tuple(loss_collection[key] for key in self.model_keys), locals()\n\n train_params = {key: train_states[key].params for key in self.model_keys}\n (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params)\n\n new_train_states = {\n key: train_states[key].apply_gradients(grads=grads[i][key])\n for i, key in enumerate(self.model_keys)\n }\n new_target_qf_params = {}\n new_target_qf_params['qf1'] = update_target_network(\n new_train_states['qf1'].params, target_qf_params['qf1'],\n self.config.soft_target_update_rate\n )\n new_target_qf_params['qf2'] = update_target_network(\n new_train_states['qf2'].params, target_qf_params['qf2'],\n self.config.soft_target_update_rate\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['log_pi', 'policy_loss', 'qf1_loss', 'qf2_loss', 'alpha_loss',\n 'alpha', 'q1_pred', 'q2_pred', 'target_q_values']\n )\n\n if self.config.use_cql:\n metrics.update(collect_jax_metrics(\n aux_values,\n ['cql_std_q1', 'cql_std_q2', 'cql_q1_rand', 'cql_q2_rand'\n 'cql_qf1_diff', 'cql_qf2_diff', 'cql_min_qf1_loss',\n 'cql_min_qf2_loss', 'cql_q1_current_actions', 'cql_q2_current_actions'\n 'cql_q1_next_actions', 'cql_q2_next_actions', 'alpha_prime',\n 'alpha_prime_loss'],\n 'cql'\n ))\n\n return new_train_states, new_target_qf_params, metrics\n\n @property\n def model_keys(self):\n return self._model_keys\n\n @property\n def train_states(self):\n return self._train_states\n\n @property\n def train_params(self):\n return {key: self.train_states[key].params for key in self.model_keys}\n\n @property\n def total_steps(self):\n return self._total_steps" }, { "identifier": "get_d4rl_dataset", "path": "vqn/replay_buffer.py", "snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )" }, { "identifier": "subsample_batch", "path": "vqn/replay_buffer.py", "snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)" }, { "identifier": "batch_to_jax", "path": "vqn/jax_utils.py", "snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)" }, { "identifier": "TanhGaussianPolicy", "path": "vqn/model.py", "snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n use_tanh: bool = True\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = mean\n if self.use_tanh:\n samples = jnp.tanh(samples)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')" }, { "identifier": "FullyConnectedQFunction", "path": "vqn/model.py", "snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', )" }, { "identifier": "SamplerPolicy", "path": "vqn/model.py", "snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(\n params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)" }, { "identifier": "StepSampler", "path": "vqn/sampler.py", "snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env" }, { "identifier": "TrajSampler", "path": "vqn/sampler.py", "snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, replay_buffer=None, deterministic=False):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env" }, { "identifier": "SequenceDataset", "path": "vqn/robomimic_utils.py", "snippet": "OBS_KEYS = (\"robot0_eef_pos\", \"robot0_eef_quat\", \"robot0_gripper_qpos\", \"object\")\nENV_TO_HORIZON_MAP = {'lift': 400,\n 'can': 400,\n 'square': 400,\n 'transport': 700,\n 'tool_hang': 700}\ndef make_dataset(dataset, env_name):\ndef process_robomimic_dataset(seq_dataset):\ndef get_robomimic_env(dataset_path, example_action, env_name):\n def __init__(self, env, horizon, example_action):\n def step(self, action):\n def reset(self):\n def render(self):\n def get_normalized_score(self, rewards):\n def _process_obs(self, obs):\ndef _check_lengths(dataset_dict: DatasetDict,\n dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(dataset_dict: Union[np.ndarray, DatasetDict],\n indx: np.ndarray) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n def split(self, ratio: float) -> Tuple['Dataset', 'Dataset']:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(self,\n percentile: Optional[float] = None,\n threshold: Optional[float] = None):\n def normalize_returns(self, scaling: float = 1000):\n def __init__(self,\n dataset_dict: dict,\n clip_to_eps: bool = True,\n eps: float = 1e-5):\n def __init__(self,\n env: gym.Env,\n clip_to_eps: bool = True,\n eps: float = 1e-5,\n ignore_done: bool = False,\n custom_dataset: dict = None):\nclass RobosuiteGymWrapper():\nclass Dataset(object):\nclass OfflineDataset(Dataset):\nclass D4RLDataset(Dataset):" }, { "identifier": "Timer", "path": "vqn/utils.py", "snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time" }, { "identifier": "define_flags_with_default", "path": "vqn/utils.py", "snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs" }, { "identifier": "set_random_seed", "path": "vqn/utils.py", "snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)" }, { "identifier": "print_flags", "path": "vqn/utils.py", "snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )" }, { "identifier": "get_user_flags", "path": "vqn/utils.py", "snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output" }, { "identifier": "prefix_metrics", "path": "vqn/utils.py", "snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }" }, { "identifier": "WandBLogger", "path": "vqn/utils.py", "snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'JaxCQL'\n config.project = ''\n config.output_dir = '/tmp/JaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n config.entity = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n entity=config.entity,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir" } ]
import os import time import uuid import numpy as np import pprint import jax import jax.numpy as jnp import flax import gym import d4rl import absl.app import absl.flags from copy import deepcopy from .vqn import VQN from .conservative_sac import ConservativeSAC from .replay_buffer import get_d4rl_dataset, subsample_batch from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .robomimic_utils import ( SequenceDataset, make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env, ENV_TO_HORIZON_MAP, OBS_KEYS ) from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger ) from viskit.logging import logger, setup_logger
11,006
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch}
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch}
with Timer() as train_timer:
11
2023-10-18 06:31:20+00:00
16k
SLDGroup/G-CASCADE
lib/networks.py
[ { "identifier": "pvt_v2_b2", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b2(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b2, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "pvt_v2_b5", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b5(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b5, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "pvt_v2_b0", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b0(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b0, self).__init__(\n patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "CUP", "path": "lib/decoders.py", "snippet": "class CUP(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CUP,self).__init__()\n \n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.ConvBlock3 = conv_block(ch_in=2*channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.ConvBlock2 = conv_block(ch_in=2*channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.ConvBlock1 = conv_block(ch_in=2*channels[3], ch_out=channels[3])\n\n def forward(self,x, skips):\n\n d4 = self.ConvBlock4(x)\n \n # decoding + concat path\n d3 = self.Up3(d4)\n d3 = torch.cat((skips[0],d3),dim=1)\n \n d3 = self.ConvBlock3(d3)\n \n d2 = self.Up2(d3)\n d2 = torch.cat((skips[1],d2),dim=1)\n d2 = self.ConvBlock2(d2)\n\n d1 = self.Up1(d2)\n d1 = torch.cat((skips[2],d1),dim=1)\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1 " }, { "identifier": "CASCADE", "path": "lib/decoders.py", "snippet": "class CASCADE(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CASCADE,self).__init__()\n \n self.Conv_1x1 = nn.Conv2d(channels[0],channels[0],kernel_size=1,stride=1,padding=0)\n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\t\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.AG3 = Attention_block(F_g=channels[1],F_l=channels[1],F_int=channels[2])\n self.ConvBlock3 = conv_block(ch_in=channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.AG2 = Attention_block(F_g=channels[2],F_l=channels[2],F_int=channels[3])\n self.ConvBlock2 = conv_block(ch_in=channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.AG1 = Attention_block(F_g=channels[3],F_l=channels[3],F_int=int(channels[3]/2))\n self.ConvBlock1 = conv_block(ch_in=channels[3], ch_out=channels[3])\n \n self.CA4 = ChannelAttention(channels[0])\n self.CA3 = ChannelAttention(channels[1])\n self.CA2 = ChannelAttention(channels[2])\n self.CA1 = ChannelAttention(channels[3])\n \n self.SA = SPA()\n \n def forward(self,x, skips):\n \n d4 = self.Conv_1x1(x)\n \n # CAM4\n d4 = self.CA4(d4)*d4\n d4 = self.SA(d4)*d4 \n d4 = self.ConvBlock4(d4)\n \n # upconv3\n d3 = self.Up3(d4)\n \n # AG3\n x3 = self.AG3(g=d3,x=skips[0])\n \n # Concat 3\n d3 = d3 + x3\n \n # CAM3\n d3 = self.CA3(d3)*d3\n d3 = self.SA(d3)*d3 \n d3 = self.ConvBlock3(d3)\n \n # upconv2\n d2 = self.Up2(d3)\n \n # AG2\n x2 = self.AG2(g=d2,x=skips[1])\n \n # Concat 2\n d2 = d2 + x2\n \n # CAM2\n d2 = self.CA2(d2)*d2\n d2 = self.SA(d2)*d2\n #print(d2.shape)\n d2 = self.ConvBlock2(d2)\n \n # upconv1\n d1 = self.Up1(d2)\n \n #print(skips[2])\n # AG1\n x1 = self.AG1(g=d1,x=skips[2])\n \n # Concat 1\n d1 = d1 + x1\n \n # CAM1\n d1 = self.CA1(d1)*d1\n d1 = self.SA(d1)*d1\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1" }, { "identifier": "CASCADE_Cat", "path": "lib/decoders.py", "snippet": "class CASCADE_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CASCADE_Cat,self).__init__()\n \n self.Conv_1x1 = nn.Conv2d(channels[0],channels[0],kernel_size=1,stride=1,padding=0)\n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\t\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.AG3 = Attention_block(F_g=channels[1],F_l=channels[1],F_int=channels[2])\n self.ConvBlock3 = conv_block(ch_in=2*channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.AG2 = Attention_block(F_g=channels[2],F_l=channels[2],F_int=channels[3])\n self.ConvBlock2 = conv_block(ch_in=2*channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.AG1 = Attention_block(F_g=channels[3],F_l=channels[3],F_int=int(channels[3]/2))\n self.ConvBlock1 = conv_block(ch_in=2*channels[3], ch_out=channels[3])\n \n self.CA4 = ChannelAttention(channels[0])\n self.CA3 = ChannelAttention(2*channels[1])\n self.CA2 = ChannelAttention(2*channels[2])\n self.CA1 = ChannelAttention(2*channels[3])\n \n self.SA = SPA()\n \n def forward(self,x, skips):\n \n d4 = self.Conv_1x1(x)\n \n # CAM4\n d4 = self.CA4(d4)*d4\n d4 = self.SA(d4)*d4 \n d4 = self.ConvBlock4(d4)\n \n # upconv3\n d3 = self.Up3(d4)\n \n # AG3\n x3 = self.AG3(g=d3,x=skips[0])\n \n # Concat 3\n d3 = torch.cat((x3,d3),dim=1)\n \n # CAM3\n d3 = self.CA3(d3)*d3\n d3 = self.SA(d3)*d3 \n d3 = self.ConvBlock3(d3)\n \n # upconv2\n d2 = self.Up2(d3)\n \n # AG2\n x2 = self.AG2(g=d2,x=skips[1])\n \n # Concat 2\n d2 = torch.cat((x2,d2),dim=1)\n \n # CAM2\n d2 = self.CA2(d2)*d2\n d2 = self.SA(d2)*d2\n #print(d2.shape)\n d2 = self.ConvBlock2(d2)\n \n # upconv1\n d1 = self.Up1(d2)\n \n #print(skips[2])\n # AG1\n x1 = self.AG1(g=d1,x=skips[2])\n \n # Concat 1\n d1 = torch.cat((x1,d1),dim=1)\n \n # CAM1\n d1 = self.CA1(d1)*d1\n d1 = self.SA(d1)*d1\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1 " }, { "identifier": "GCUP", "path": "lib/decoders.py", "snippet": "class GCUP(nn.Module):\n def __init__(self, channels=[512,320,128,64], img_size=224, drop_path_rate=0.0, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCUP,self).__init__()\n \n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4, 2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0],ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1],ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2],ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n )\n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = d3 + skips[0]\n \n # GCAM3\n d3 = self.gcb3(d3) \n \n # UCB2\n d2 = self.ucb2(d3) \n \n # Aggregation 2\n d2 = d2 + skips[1] \n \n # GCAM2\n d2 = self.gcb2(d2)\n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = d1 + skips[2]\n \n # GCAM1\n d1 = self.gcb1(d1)\n \n return d4, d3, d2, d1" }, { "identifier": "GCUP_Cat", "path": "lib/decoders.py", "snippet": "class GCUP_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64], img_size=224, drop_path_rate=0.0, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCUP_Cat,self).__init__()\n \n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4, 2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0],ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(2*channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=2*channels[1],ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(2*channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=2*channels[2],ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(2*channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n ) \n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n \n # UCB3\n d3 = self.ucb3(d4)\n\n # Aggregation 3\n d3 = torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n\n # UCB2\n d2 = self.ucb2(d3)\n\n # Aggregation 2\n d2 = torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n\n return d4, d3, d2, d1" }, { "identifier": "GCASCADE", "path": "lib/decoders.py", "snippet": "class GCASCADE(nn.Module):\n def __init__(self, channels=[512,320,128,64], drop_path_rate=0.0, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCASCADE,self).__init__()\n\n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4,2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0], ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1], ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2], ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n )\n\n self.spa = SPA()\n\n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n d4 = self.spa(d4)*d4 \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = d3 + skips[0] #torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n d3 = self.spa(d3)*d3 \n \n # UCB2\n d2 = self.ucb2(d3)\n \n # Aggregation 2\n d2 = d2 + skips[1] #torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n d2 = self.spa(d2)*d2\n \n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = d1 + skips[2] #torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n d1 = self.spa(d1)*d1\n \n return d4, d3, d2, d1" }, { "identifier": "GCASCADE_Cat", "path": "lib/decoders.py", "snippet": "class GCASCADE_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64], drop_path_rate=0.0, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCASCADE_Cat,self).__init__()\n\n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4,2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0], ch_out=channels[1], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1]*2, self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1]*2, ch_out=channels[2], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2]*2, self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2]*2, ch_out=channels[3], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3]*2, self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n ) \n \n self.spa = SPA()\n\n \n def forward(self,x, skips): \n \n # GCAM4\n d4 = self.gcb4(x) \n d4 = self.spa(d4)*d4 \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n d3 = self.spa(d3)*d3 \n \n # ucb2\n d2 = self.ucb2(d3)\n \n # Aggregation 2\n d2 = torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n d2 = self.spa(d2)*d2\n \n \n # ucb1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n d1 = self.spa(d1)*d1\n \n return d4, d3, d2, d1" }, { "identifier": "pvig_ti_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_ti_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,6,2] # number of basic blocks in the backbone\n self.channels = [48, 96, 240, 384] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_s_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_s_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,6,2] # number of basic blocks in the backbone\n self.channels = [80, 160, 400, 640] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_m_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_m_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,16,2] # number of basic blocks in the backbone\n self.channels = [96, 192, 384, 768] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_b_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_b_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,18,2] # number of basic blocks in the backbone\n self.channels = [128, 256, 512, 1024] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_b_224_gelu']\n return model" }, { "identifier": "maxvit_tiny_rw_224", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_tiny_rw_224(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs)" }, { "identifier": "maxvit_rmlp_tiny_rw_256", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)" }, { "identifier": "maxxvit_rmlp_small_rw_256", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs):\n return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)" }, { "identifier": "maxvit_rmlp_small_rw_224", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_rmlp_small_rw_224(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs)" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import timm import logging from scipy import ndimage from lib.pvtv2 import pvt_v2_b2, pvt_v2_b5, pvt_v2_b0 from lib.decoders import CUP, CASCADE, CASCADE_Cat, GCUP, GCUP_Cat, GCASCADE, GCASCADE_Cat from lib.pyramid_vig import pvig_ti_224_gelu, pvig_s_224_gelu, pvig_m_224_gelu, pvig_b_224_gelu from lib.maxxvit_4out import maxvit_tiny_rw_224 as maxvit_tiny_rw_224_4out from lib.maxxvit_4out import maxvit_rmlp_tiny_rw_256 as maxvit_rmlp_tiny_rw_256_4out from lib.maxxvit_4out import maxxvit_rmlp_small_rw_256 as maxxvit_rmlp_small_rw_256_4out from lib.maxxvit_4out import maxvit_rmlp_small_rw_224 as maxvit_rmlp_small_rw_224_4out
13,376
# Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]
logger = logging.getLogger(__name__) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class PVT_CUP(nn.Module): def __init__(self, n_class=1): super(PVT_CUP, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CUP(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CUP decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CASCADE(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE_Cat(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE_Cat, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) print('Model %s created, param count: %d' % ('PVT backbone: ', sum([m.numel() for m in self.backbone.parameters()]))) # decoder initialization self.decoder = CASCADE_Cat(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE_Cat decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCUP(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCUP, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCUP_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCUP_decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]
self.backbone2 = maxvit_rmlp_small_rw_224_4out() # [64, 128, 320, 512]
9
2023-10-24 17:49:10+00:00
16k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]:\n def read_next(length: int) -> bytes:\ndef _bytes_to_int(b: bytes) -> int:\ndef parse_server_hello(packets: Iterable[bytes]) -> ServerHello:\ndef make_client_hello(client_hello: ClientHello) -> bytes:\n def prefix_length(block_name: str, width_bytes: int = 2) -> Iterator[None]:" }, { "identifier": "AlertDescription", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertDescription(Enum):\n \"\"\" Different alert messages that can be sent by the server. \"\"\"\n close_notify = b'\\x00'\n unexpected_message = b'\\x0a'\n bad_record_mac = b'\\x14'\n record_overflow = b'\\x16'\n handshake_failure = b'\\x28'\n bad_certificate = b'\\x2a'\n unsupported_certificate = b'\\x2b'\n certificate_revoked = b'\\x2c'\n certificate_expired = b'\\x2d'\n certificate_unknown = b'\\x2e'\n illegal_parameter = b'\\x2f'\n unknown_ca = b'\\x30'\n access_denied = b'\\x31'\n decode_error = b'\\x32'\n decrypt_error = b'\\x33'\n protocol_version = b'\\x46'\n insufficient_security = b'\\x47'\n internal_error = b'\\x50'\n inappropriate_fallback = b'\\x56'\n user_canceled = b'\\x5a'\n missing_extension = b'\\x6d'\n unsupported_extension = b'\\x6e'\n unrecognized_name = b'\\x70'\n bad_certificate_status_response = b'\\x71'\n unknown_psk_identity = b'\\x73'\n certificate_required = b'\\x74'\n no_application_protocol = b'\\x78'" }, { "identifier": "CipherSuite", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CipherSuite(Enum):\n def __repr__(self):\n return self.name\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each cipher suite with the protocols it's supported at.\n # Default to all but TLS 1.3, because that's the most common.\n def __init__(self, _: bytes, protocols: Sequence[Protocol] = (Protocol.SSLv3, Protocol.TLS1_0, Protocol.TLS1_1, Protocol.TLS1_2)):\n self.protocols = protocols\n\n # Pseudo cipher suite, not actually picked.\n #TLS_EMPTY_RENEGOTIATION_INFO_SCSV = b\"\\x00\\xff\"\n\n # TLS 1.3 cipher suites.\n TLS_AES_128_GCM_SHA256 = b\"\\x13\\x01\", (Protocol.TLS1_3,)\n TLS_AES_256_GCM_SHA384 = b\"\\x13\\x02\", (Protocol.TLS1_3,)\n TLS_CHACHA20_POLY1305_SHA256 = b\"\\x13\\x03\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_SHA256 = b\"\\x13\\x04\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_8_SHA256 = b\"\\x13\\x05\", (Protocol.TLS1_3,)\n\n # Cipher suite that had its number reassigned.\n OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xcc\\x13'\n \n # Cipher suites adapted from IANA assignments:\n # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4\n TLS_AEGIS_128L_SHA256 = b'\\x13\\x07' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_AEGIS_256_SHA384 = b'\\x13\\x06' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x19' # [RFC4346]\n TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x17' # [RFC4346][RFC6347]\n TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1B' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA = b'\\x00\\x34' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA256 = b'\\x00\\x6C' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA6' # [RFC5288]\n TLS_DH_anon_WITH_AES_256_CBC_SHA = b'\\x00\\x3A' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6D' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA7' # [RFC5288]\n TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x46' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5A' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x47' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5B' # [RFC6209]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x46' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBF' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x84' # [RFC6367]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x89' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC5' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x85' # [RFC6367]\n TLS_DH_anon_WITH_DES_CBC_SHA = b'\\x00\\x1A' # [RFC8996]\n TLS_DH_anon_WITH_RC4_128_MD5 = b'\\x00\\x18' # [RFC5246][RFC6347]\n TLS_DH_anon_WITH_SEED_CBC_SHA = b'\\x00\\x9B' # [RFC4162]\n TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0B' # [RFC4346]\n TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0D' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x30' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3E' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA4' # [RFC5288]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x36' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x68' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA5' # [RFC5288]\n TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3E' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x58' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3F' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x59' # [RFC6209]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x42' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBB' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x82' # [RFC6367]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x85' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC1' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x83' # [RFC6367]\n TLS_DH_DSS_WITH_DES_CBC_SHA = b'\\x00\\x0C' # [RFC8996]\n TLS_DH_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x97' # [RFC4162]\n TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0E' # [RFC4346]\n TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x10' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x31' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3F' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA0' # [RFC5288]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x37' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x69' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA1' # [RFC5288]\n TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x40' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x54' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x41' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x55' # [RFC6209]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x43' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBC' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7E' # [RFC6367]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x86' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC2' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7F' # [RFC6367]\n TLS_DH_RSA_WITH_DES_CBC_SHA = b'\\x00\\x0F' # [RFC8996]\n TLS_DH_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x98' # [RFC4162]\n TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x11' # [RFC4346]\n TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x13' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x32' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x40' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA2' # [RFC5288]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x38' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6A' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA3' # [RFC5288]\n TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x42' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x56' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x43' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x57' # [RFC6209]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x44' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBD' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x80' # [RFC6367]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x87' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC3' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x81' # [RFC6367]\n TLS_DHE_DSS_WITH_DES_CBC_SHA = b'\\x00\\x12' # [RFC8996]\n TLS_DHE_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x99' # [RFC4162]\n TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8F' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x90' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB2' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_128_CCM = b'\\xC0\\xA6' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAA' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x91' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB3' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CCM = b'\\xC0\\xA7' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAB' # [RFC5487]\n TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x66' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6C' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x67' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6D' # [RFC6209]\n TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x96' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x90' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x97' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x91' # [RFC6367]\n TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAD' # [RFC7905]\n TLS_DHE_PSK_WITH_NULL_SHA = b'\\x00\\x2D' # [RFC4785]\n TLS_DHE_PSK_WITH_NULL_SHA256 = b'\\x00\\xB4' # [RFC5487]\n TLS_DHE_PSK_WITH_NULL_SHA384 = b'\\x00\\xB5' # [RFC5487]\n TLS_DHE_PSK_WITH_RC4_128_SHA = b'\\x00\\x8E' # [RFC4279][RFC6347]\n TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x14' # [RFC4346]\n TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x16' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x33' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x67' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CCM = b'\\xC0\\x9E' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA2' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9E' # [RFC5288]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x39' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6B' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CCM = b'\\xC0\\x9F' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA3' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9F' # [RFC5288]\n TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x44' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x52' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x45' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x53' # [RFC6209]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x45' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBE' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7C' # [RFC6367]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x88' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC4' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7D' # [RFC6367]\n TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAA' # [RFC7905]\n TLS_DHE_RSA_WITH_DES_CBC_SHA = b'\\x00\\x15' # [RFC8996]\n TLS_DHE_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x9A' # [RFC4162]\n TLS_ECCPWD_WITH_AES_128_CCM_SHA256 = b'\\xC0\\xB2' # [RFC8492]\n TLS_ECCPWD_WITH_AES_128_GCM_SHA256 = b'\\xC0\\xB0' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_CCM_SHA384 = b'\\xC0\\xB3' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_GCM_SHA384 = b'\\xC0\\xB1' # [RFC8492]\n TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x17' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_128_CBC_SHA = b'\\xC0\\x18' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_256_CBC_SHA = b'\\xC0\\x19' # [RFC8422]\n TLS_ECDH_anon_WITH_NULL_SHA = b'\\xC0\\x15' # [RFC8422]\n TLS_ECDH_anon_WITH_RC4_128_SHA = b'\\xC0\\x16' # [RFC8422][RFC6347]\n TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x03' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x04' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x25' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2D' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x05' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x26' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2E' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4A' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5E' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4B' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5F' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x74' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x88' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x75' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x89' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_NULL_SHA = b'\\xC0\\x01' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x02' # [RFC8422][RFC6347]\n TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x0D' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x0E' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x29' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x31' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0F' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x2A' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x32' # [RFC5289]\n TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4E' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x62' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4F' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x63' # [RFC6209]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x78' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8C' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x79' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8D' # [RFC6367]\n TLS_ECDH_RSA_WITH_NULL_SHA = b'\\xC0\\x0B' # [RFC8422]\n TLS_ECDH_RSA_WITH_RC4_128_SHA = b'\\xC0\\x0C' # [RFC8422][RFC6347]\n TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x08' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x09' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x23' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM = b'\\xC0\\xAC' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = b'\\xC0\\xAE' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2B' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0A' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x24' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM = b'\\xC0\\xAD' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = b'\\xC0\\xAF' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2C' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x48' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5C' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x49' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5D' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x72' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x86' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x73' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x87' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA9' # [RFC7905]\n TLS_ECDHE_ECDSA_WITH_NULL_SHA = b'\\xC0\\x06' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x07' # [RFC8422][RFC6347]\n TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x34' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = b'\\xC0\\x35' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x37' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = b'\\xD0\\x03' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = b'\\xD0\\x05' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\xD0\\x01' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = b'\\xC0\\x36' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x38' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\xD0\\x02' # [RFC8442]\n TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x70' # [RFC6209]\n TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x71' # [RFC6209]\n TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x9A' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x9B' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAC' # [RFC7905]\n TLS_ECDHE_PSK_WITH_NULL_SHA = b'\\xC0\\x39' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA256 = b'\\xC0\\x3A' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA384 = b'\\xC0\\x3B' # [RFC5489]\n TLS_ECDHE_PSK_WITH_RC4_128_SHA = b'\\xC0\\x33' # [RFC5489][RFC6347]\n TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x12' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x13' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x27' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2F' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x14' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x28' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x30' # [RFC5289]\n TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4C' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x60' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4D' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x61' # [RFC6209]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x76' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8A' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x77' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8B' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA8' # [RFC7905]\n TLS_ECDHE_RSA_WITH_NULL_SHA = b'\\xC0\\x10' # [RFC8422]\n TLS_ECDHE_RSA_WITH_RC4_128_SHA = b'\\xC0\\x11' # [RFC8422][RFC6347]\n TLS_GOSTR341112_256_WITH_28147_CNT_IMIT = b'\\xC1\\x02' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_CTR_OMAC = b'\\xC1\\x00' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_L = b'\\xC1\\x03' # [RFC9367]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_S = b'\\xC1\\x05' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_CTR_OMAC = b'\\xC1\\x01' # [RFC9189]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_L = b'\\xC1\\x04' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_S = b'\\xC1\\x06' # [RFC9367]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = b'\\x00\\x29' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = b'\\x00\\x26' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x2A' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = b'\\x00\\x27' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x2B' # [RFC2712][RFC6347]\n TLS_KRB5_EXPORT_WITH_RC4_40_SHA = b'\\x00\\x28' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = b'\\x00\\x23' # [RFC2712]\n TLS_KRB5_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1F' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_MD5 = b'\\x00\\x22' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_SHA = b'\\x00\\x1E' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_MD5 = b'\\x00\\x25' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_SHA = b'\\x00\\x21' # [RFC2712]\n TLS_KRB5_WITH_RC4_128_MD5 = b'\\x00\\x24' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_RC4_128_SHA = b'\\x00\\x20' # [RFC2712][RFC6347]\n TLS_NULL_WITH_NULL_NULL = b'\\x00\\x00' # [RFC5246]\n TLS_PSK_DHE_WITH_AES_128_CCM_8 = b'\\xC0\\xAA' # [RFC6655]\n TLS_PSK_DHE_WITH_AES_256_CCM_8 = b'\\xC0\\xAB' # [RFC6655]\n TLS_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8B' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x8C' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xAE' # [RFC5487]\n TLS_PSK_WITH_AES_128_CCM = b'\\xC0\\xA4' # [RFC6655]\n TLS_PSK_WITH_AES_128_CCM_8 = b'\\xC0\\xA8' # [RFC6655]\n TLS_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA8' # [RFC5487]\n TLS_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x8D' # [RFC4279]\n TLS_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xAF' # [RFC5487]\n TLS_PSK_WITH_AES_256_CCM = b'\\xC0\\xA5' # [RFC6655]\n TLS_PSK_WITH_AES_256_CCM_8 = b'\\xC0\\xA9' # [RFC6655]\n TLS_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA9' # [RFC5487]\n TLS_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x64' # [RFC6209]\n TLS_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6A' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x65' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6B' # [RFC6209]\n TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x94' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8E' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x95' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8F' # [RFC6367]\n TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAB' # [RFC7905]\n TLS_PSK_WITH_NULL_SHA = b'\\x00\\x2C' # [RFC4785]\n TLS_PSK_WITH_NULL_SHA256 = b'\\x00\\xB0' # [RFC5487]\n TLS_PSK_WITH_NULL_SHA384 = b'\\x00\\xB1' # [RFC5487]\n TLS_PSK_WITH_RC4_128_SHA = b'\\x00\\x8A' # [RFC4279][RFC6347]\n TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x08' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x06' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x03' # [RFC4346][RFC6347]\n TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x93' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x94' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB6' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAC' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x95' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB7' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAD' # [RFC5487]\n TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x68' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6E' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x69' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6F' # [RFC6209]\n TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x98' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x92' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x99' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x93' # [RFC6367]\n TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAE' # [RFC7905]\n TLS_RSA_PSK_WITH_NULL_SHA = b'\\x00\\x2E' # [RFC4785]\n TLS_RSA_PSK_WITH_NULL_SHA256 = b'\\x00\\xB8' # [RFC5487]\n TLS_RSA_PSK_WITH_NULL_SHA384 = b'\\x00\\xB9' # [RFC5487]\n TLS_RSA_PSK_WITH_RC4_128_SHA = b'\\x00\\x92' # [RFC4279][RFC6347]\n TLS_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0A' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x2F' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3C' # [RFC5246]\n TLS_RSA_WITH_AES_128_CCM = b'\\xC0\\x9C' # [RFC6655]\n TLS_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA0' # [RFC6655]\n TLS_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9C' # [RFC5288]\n TLS_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x35' # [RFC5246]\n TLS_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x3D' # [RFC5246]\n TLS_RSA_WITH_AES_256_CCM = b'\\xC0\\x9D' # [RFC6655]\n TLS_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA1' # [RFC6655]\n TLS_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9D' # [RFC5288]\n TLS_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3C' # [RFC6209]\n TLS_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x50' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3D' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x51' # [RFC6209]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x41' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBA' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7A' # [RFC6367]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x84' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC0' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7B' # [RFC6367]\n TLS_RSA_WITH_DES_CBC_SHA = b'\\x00\\x09' # [RFC8996]\n TLS_RSA_WITH_IDEA_CBC_SHA = b'\\x00\\x07' # [RFC8996]\n TLS_RSA_WITH_NULL_MD5 = b'\\x00\\x01' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA = b'\\x00\\x02' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA256 = b'\\x00\\x3B' # [RFC5246]\n TLS_RSA_WITH_RC4_128_MD5 = b'\\x00\\x04' # [RFC5246][RFC6347]\n TLS_RSA_WITH_RC4_128_SHA = b'\\x00\\x05' # [RFC5246][RFC6347]\n TLS_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x96' # [RFC4162]\n TLS_SHA256_SHA256 = b'\\xC0\\xB4' # [RFC9150]\n TLS_SHA384_SHA384 = b'\\xC0\\xB5' # [RFC9150]\n TLS_SM4_CCM_SM3 = b'\\x00\\xC7' # [RFC8998]\n TLS_SM4_GCM_SM3 = b'\\x00\\xC6' # [RFC8998]\n TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1C' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = b'\\xC0\\x1F' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = b'\\xC0\\x22' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1B' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1E' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x21' # [RFC5054]\n TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1A' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1D' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_256_CBC_SHA = b'\\xC0\\x20' # [RFC5054]" }, { "identifier": "Group", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Group(Enum):\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each group with whether it's a PQ group.\n def __init__(self, _: bytes, is_pq: bool = False):\n self.is_pq = is_pq\n def __repr__(self):\n return self.name\n \n sect163k1 = b'\\x00\\x01'\n sect163r1 = b'\\x00\\x02'\n sect163r2 = b'\\x00\\x03'\n sect193r1 = b'\\x00\\x04'\n sect193r2 = b'\\x00\\x05'\n sect233k1 = b'\\x00\\x06'\n sect233r1 = b'\\x00\\x07'\n sect239k1 = b'\\x00\\x08'\n sect283k1 = b'\\x00\\x09'\n sect283r1 = b'\\x00\\x0a'\n sect409k1 = b'\\x00\\x0b'\n sect409r1 = b'\\x00\\x0c'\n sect571k1 = b'\\x00\\x0d'\n sect571r1 = b'\\x00\\x0e'\n secp160k1 = b'\\x00\\x0f'\n secp160r1 = b'\\x00\\x10'\n secp160r2 = b'\\x00\\x11'\n secp192k1 = b'\\x00\\x12'\n secp192r1 = b'\\x00\\x13'\n secp224k1 = b'\\x00\\x14'\n secp224r1 = b'\\x00\\x15'\n secp256k1 = b'\\x00\\x16'\n secp256r1 = b'\\x00\\x17'\n secp384r1 = b'\\x00\\x18'\n secp521r1 = b'\\x00\\x19'\n brainpoolP256r1 = b'\\x00\\x1a'\n brainpoolP384r1 = b'\\x00\\x1b'\n brainpoolP512r1 = b'\\x00\\x1c'\n x25519 = b'\\x00\\x1d'\n x448 = b'\\x00\\x1e'\n brainpoolP256r1tls13 = b'\\x00\\x1f'\n brainpoolP384r1tls13 = b'\\x00\\x20'\n brainpoolP512r1tls13 = b'\\x00\\x21'\n GC256A = b'\\x00\\x22'\n GC256B = b'\\x00\\x23'\n GC256C = b'\\x00\\x24'\n GC256D = b'\\x00\\x25'\n GC512A = b'\\x00\\x26'\n GC512B = b'\\x00\\x27'\n GC512C = b'\\x00\\x28'\n curveSM2 = b'\\x00\\x29'\n ffdhe2048 = b'\\x01\\x00'\n ffdhe3072 = b'\\x01\\x01'\n ffdhe4096 = b'\\x01\\x02'\n ffdhe6144 = b'\\x01\\x03'\n ffdhe8192 = b'\\x01\\x04'\n arbitrary_explicit_prime_curves = b'\\xff\\x01'\n arbitrary_explicit_char2_curves = b'\\xff\\x02'\n\n # Somewhat common post-quantum groups, not yet standardized:\n X25519Kyber768Draft00 = b'\\x63\\x99', True\n X25519Kyber768Draft00_obsolete = b'\\xfe\\x31', True\n X25519Kyber512Draft00 = b'\\xfe\\x30', True\n SecP256r1Kyber768Draft00 = b'\\x63\\x9a', True\n\n # Long list of unusual post-quantum groups from liboqs:\n # https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md?plain=1#L13\n frodo640aes = b'\\x02\\x00', True\n p256_frodo640aes = b'\\x2F\\x00', True\n x25519_frodo640aes = b'\\x2F\\x80', True\n frodo640shake = b'\\x02\\x01', True\n p256_frodo640shake = b'\\x2F\\x01', True\n x25519_frodo640shake = b'\\x2F\\x81', True\n frodo976aes = b'\\x02\\x02', True\n p384_frodo976aes = b'\\x2F\\x02', True\n x448_frodo976aes = b'\\x2F\\x82', True\n frodo976shake = b'\\x02\\x03', True\n p384_frodo976shake = b'\\x2F\\x03', True\n x448_frodo976shake = b'\\x2F\\x83', True\n frodo1344aes = b'\\x02\\x04', True\n p521_frodo1344aes = b'\\x2F\\x04', True\n frodo1344shake = b'\\x02\\x05', True\n p521_frodo1344shake = b'\\x2F\\x05', True\n kyber512 = b'\\x02\\x3A', True\n p256_kyber512 = b'\\x2F\\x3A', True\n x25519_kyber512 = b'\\x2F\\x39', True\n kyber768 = b'\\x02\\x3C', True\n p384_kyber768 = b'\\x2F\\x3C', True\n x448_kyber768 = b'\\x2F\\x90', True\n kyber1024 = b'\\x02\\x3D', True\n p521_kyber1024 = b'\\x2F\\x3D', True\n bikel1 = b'\\x02\\x41', True\n p256_bikel1 = b'\\x2F\\x41', True\n x25519_bikel1 = b'\\x2F\\xAE', True\n bikel3 = b'\\x02\\x42', True\n p384_bikel3 = b'\\x2F\\x42', True\n x448_bikel3 = b'\\x2F\\xAF', True\n bikel5 = b'\\x02\\x43', True\n p521_bikel5 = b'\\x2F\\x43', True\n hqc128 = b'\\x02\\x2C', True\n p256_hqc128 = b'\\x2F\\x2C', True\n x25519_hqc128 = b'\\x2F\\xAC', True\n hqc192 = b'\\x02\\x2D', True\n p384_hqc192 = b'\\x2F\\x2D', True\n x448_hqc192 = b'\\x2F\\xAD', True\n hqc256 = b'\\x02\\x2E', True\n p521_hqc256 = b'\\x2F\\x2E', True\n dilithium2 = b'\\xfe\\xa0', True\n p256_dilithium2 = b'\\xfe\\xa1', True\n rsa3072_dilithium2 = b'\\xfe\\xa2', True\n dilithium3 = b'\\xfe\\xa3', True\n p384_dilithium3 = b'\\xfe\\xa4', True\n dilithium5 = b'\\xfe\\xa5', True\n p521_dilithium5 = b'\\xfe\\xa6', True\n falcon512 = b'\\xfe\\xae', True\n p256_falcon512 = b'\\xfe\\xaf', True\n rsa3072_falcon512 = b'\\xfe\\xb0', True\n falcon1024 = b'\\xfe\\xb1', True\n p521_falcon1024 = b'\\xfe\\xb2', True\n sphincssha2128fsimple = b'\\xfe\\xb3', True\n p256_sphincssha2128fsimple = b'\\xfe\\xb4', True\n rsa3072_sphincssha2128fsimple = b'\\xfe\\xb5', True\n sphincssha2128ssimple = b'\\xfe\\xb6', True\n p256_sphincssha2128ssimple = b'\\xfe\\xb7', True\n rsa3072_sphincssha2128ssimple = b'\\xfe\\xb8', True\n sphincssha2192fsimple = b'\\xfe\\xb9', True\n p384_sphincssha2192fsimple = b'\\xfe\\xba', True\n sphincssha2192ssimple = b'\\xfe\\xbb', True\n p384_sphincssha2192ssimple = b'\\xfe\\xbc', True\n sphincssha2256fsimple = b'\\xfe\\xbd', True\n p521_sphincssha2256fsimple = b'\\xfe\\xbe', True\n sphincssha2256ssimple = b'\\xfe\\xc0', True\n p521_sphincssha2256ssimple = b'\\xfe\\xc1', True\n sphincsshake128fsimple = b'\\xfe\\xc2', True\n p256_sphincsshake128fsimple = b'\\xfe\\xc3', True\n rsa3072_sphincsshake128fsimple = b'\\xfe\\xc4', True\n sphincsshake128ssimple = b'\\xfe\\xc5', True\n p256_sphincsshake128ssimple = b'\\xfe\\xc6', True\n rsa3072_sphincsshake128ssimple = b'\\xfe\\xc7', True\n sphincsshake192fsimple = b'\\xfe\\xc8', True\n p384_sphincsshake192fsimple = b'\\xfe\\xc9', True\n sphincsshake192ssimple = b'\\xfe\\xca', True\n p384_sphincsshake192ssimple = b'\\xfe\\xcb', True\n sphincsshake256fsimple = b'\\xfe\\xcc', True\n p521_sphincsshake256fsimple = b'\\xfe\\xcd', True\n sphincsshake256ssimple = b'\\xfe\\xce', True\n p521_sphincsshake256ssimple = b'\\xfe\\xcf', True" }, { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" }, { "identifier": "CompressionMethod", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CompressionMethod(Enum):\n NULL = b'\\x00'\n DEFLATE = b'\\x01'" } ]
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,182
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e
def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello:
0
2023-10-21 02:00:13+00:00
16k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/vit/sam/modules/mask_generator.py
[ { "identifier": "MaskData", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n \"\"\"Initialize a MaskData object, ensuring all values are supported types.\"\"\"\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.'\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n \"\"\"Set an item in the MaskData object, ensuring it is a supported type.\"\"\"\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.'\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n \"\"\"Delete an item from the MaskData object.\"\"\"\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"Get an item from the MaskData object.\"\"\"\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n \"\"\"Return an ItemsView of the MaskData object.\"\"\"\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n \"\"\"Filter the MaskData object based on the given boolean tensor.\"\"\"\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.')\n\n def cat(self, new_stats: 'MaskData') -> None:\n \"\"\"Concatenate a new MaskData object to the current one.\"\"\"\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.')\n\n def to_numpy(self) -> None:\n \"\"\"Convert all torch tensors in the MaskData object to numpy arrays.\"\"\"\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n \"\"\"Calculate the area of a mask from its uncompressed RLE.\"\"\"\n return sum(rle['counts'][1::2])" }, { "identifier": "batch_iterator", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n \"\"\"Yield batches of data from the input arguments.\"\"\"\n assert args and all(len(a) == len(args[0]) for a in args), 'Batched iteration must have same-size inputs.'\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n masks = masks.flatten(0, -3) if len(shape) > 2 else masks.unsqueeze(0)\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0]" }, { "identifier": "box_xyxy_to_xywh", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert bounding boxes from XYXY format to XYWH format.\"\"\"\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:\n \"\"\"Generate point grids for all crop layers.\"\"\"\n return [build_point_grid(int(n_per_side / (scale_per_layer ** i))) for i in range(n_layers + 1)]" }, { "identifier": "calculate_stability_score", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1,\n dtype=torch.int32))\n unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32))\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Encode uncompressed RLE (run-length encoding) to COCO RLE format.\"\"\"\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle['size']\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle['counts'] = rle['counts'].decode('utf-8') # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int,\n overlap_ratio: float) -> Tuple[List[List[int]], List[int]]:\n \"\"\"Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.\"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n \"\"\"Crops bounding boxes to the size of the input image.\"\"\"\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def is_box_near_crop_edge(boxes: torch.Tensor,\n crop_box: List[int],\n orig_box: List[int],\n atol: float = 20.0) -> torch.Tensor:\n \"\"\"Return a boolean tensor indicating if boxes are near the crop edge.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"Encode masks as uncompressed RLEs in the format expected by pycocotools.\"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat([\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), ])\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({'size': [h, w], 'counts': counts})\n return out" }, { "identifier": "remove_small_regions", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:\n \"\"\"Remove small disconnected regions or holes in a mask, returning the mask and a modification indicator.\"\"\"\n import cv2 # type: ignore\n\n assert mode in {'holes', 'islands'}\n correct_holes = mode == 'holes'\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if not small_regions:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if not fill_labels:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle['size']\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle['counts']:\n mask[idx:idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n \"\"\"Uncrop bounding boxes by adding the crop box offset.\"\"\"\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:\n \"\"\"Uncrop masks by padding them to the original image size.\"\"\"\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n \"\"\"Uncrop points by adding the crop box offset.\"\"\"\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" }, { "identifier": "PromptPredictor", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/modules/prompt_predictor.py", "snippet": "class PromptPredictor:\n\n def __init__(self, sam_model: Sam) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(self, image: np.ndarray, image_format: str = 'RGB') -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in {'RGB', 'BGR'}, f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(self, transformed_image: torch.Tensor, original_image_size: Tuple[int, ...]) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n if len(transformed_image.shape) != 4 \\\n or transformed_image.shape[1] != 3 \\\n or max(*transformed_image.shape[2:]) != self.model.image_encoder.img_size:\n raise ValueError('set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.')\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray, None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray, None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray, None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) before mask prediction.')\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (point_labels is not None), 'point_labels must be supplied if point_coords is supplied.'\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor, None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor, None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray, None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) before mask prediction.')\n\n points = (point_coords, point_labels) if point_coords is not None else None\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) to generate an embedding.')\n assert self.features is not None, 'Features must exist if an image has been set.'\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "Sam", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/modules/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = 'RGB'\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x['image']) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if 'point_coords' in image_record:\n points = (image_record['point_coords'], image_record['point_labels'])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get('boxes', None),\n masks=image_record.get('mask_inputs', None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record['image'].shape[-2:],\n original_size=image_record['original_size'],\n )\n masks = masks > self.mask_threshold\n outputs.append({\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks, })\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode='bilinear',\n align_corners=False,\n )\n masks = masks[..., :input_size[0], :input_size[1]]\n masks = F.interpolate(masks, original_size, mode='bilinear', align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n return F.pad(x, (0, padw, 0, padh))" } ]
from typing import Any, Dict, List, Optional, Tuple from torchvision.ops.boxes import batched_nms, box_area # type: ignore from ..amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points) from .prompt_predictor import PromptPredictor from .sam import Sam from pycocotools import mask as mask_utils # type: ignore # noqa: F401 import numpy as np import torch import cv2 # type: ignore # noqa: F401
11,323
return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = 'binary_mask', ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int, None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray), None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != (point_grids is None), \ 'Exactly one of points_per_side or point_grid must be provided.' if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in {'binary_mask', 'uncompressed_rle', 'coco_rle'}, f'Unknown output_mode {output_mode}.' if output_mode == 'coco_rle': if min_mask_region_area > 0: self.predictor = PromptPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode # TODO: Temporary implementation for compatibility def __call__(self, image: np.ndarray, augment=False, visualize=False) -> List[Dict[str, Any]]: return self.generate(image) @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any), np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == 'coco_rle': mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] elif self.output_mode == 'binary_mask': mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] else: mask_data['segmentations'] = mask_data['rles'] # Write mask records curr_anns = [] for idx in range(len(mask_data['segmentations'])): ann = { 'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w)
data['rles'] = mask_to_rle_pytorch(data['masks'])
10
2023-10-24 00:45:55+00:00
16k
bytedance/ColTrack
models/dino/dino.py
[ { "identifier": "box_ops", "path": "util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "nested_tensor_from_tensor_list", "path": "util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat(((x[..., :2] + 1) / 3, x[..., 2:] / 2), dim=-1)\n elif x.shape[-1] == 2:\n x = (x + 1) / 3\n else:\n raise ValueError\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "scale_sigmoid", "path": "util/misc.py", "snippet": "def scale_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat((3 * (x[..., :2]) - 1, x[..., 2:] * 2), dim=-1)\n elif x.shape[-1] == 2:\n x = 3 * x - 1\n else:\n raise ValueError\n return x" }, { "identifier": "build_backbone", "path": "models/dino/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model" }, { "identifier": "build_matcher", "path": "models/dino/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" }, { "identifier": "DETRsegm", "path": "models/dino/segmentation.py", "snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out" }, { "identifier": "PostProcessPanoptic", "path": "models/dino/segmentation.py", "snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds" }, { "identifier": "PostProcessSegm", "path": "models/dino/segmentation.py", "snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results" }, { "identifier": "dice_loss", "path": "models/dino/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "build_deformable_transformer", "path": "models/dino/deformable_transformer.py", "snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )" }, { "identifier": "sigmoid_focal_loss", "path": "models/dino/utils.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "MLP", "path": "models/dino/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "prepare_for_cdn", "path": "models/dino/dn_components.py", "snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n # known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta" }, { "identifier": "dn_post_process", "path": "models/dino/dn_components.py", "snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord" } ]
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from util import box_ops from util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid, scale_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,745
# for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs # import ipdb; ipdb.set_trace() if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = scale_sigmoid(layer_enc_outputs_coord_unsig.sigmoid()) layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1]) # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1] # enc_outputs_coord = enc_outputs_unsig.sigmoid() # enc_outputs_class = self.enc_class_embed(hs_enc[:-1]) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] # self.replace_sa_with_double_ca = replace_sa_with_double_ca if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): raise NotImplementedError def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0]+=self.label_enc.weight[0,0]*0.0 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = scale_sigmoid(layer_outputs_unsig.sigmoid()) outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) # outputs_class = self.class_embed(hs) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs # import ipdb; ipdb.set_trace() if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = scale_sigmoid(layer_enc_outputs_coord_unsig.sigmoid()) layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1]) # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1] # enc_outputs_coord = enc_outputs_unsig.sigmoid() # enc_outputs_class = self.enc_class_embed(hs_enc[:-1]) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
0
2023-10-16 02:18:33+00:00
16k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/strategy/test_strategy_helpers.py
[ { "identifier": "DataProvider", "path": "freqtrade/data/dataprovider.py", "snippet": "class DataProvider:\n\n def __init__(\n self,\n config: Config,\n exchange: Optional[Exchange],\n pairlists=None,\n rpc: Optional[RPCManager] = None\n ) -> None:\n self._config = config\n self._exchange = exchange\n self._pairlists = pairlists\n self.__rpc = rpc\n self.__cached_pairs: Dict[PairWithTimeframe, Tuple[DataFrame, datetime]] = {}\n self.__slice_index: Optional[int] = None\n self.__slice_date: Optional[datetime] = None\n\n self.__cached_pairs_backtesting: Dict[PairWithTimeframe, DataFrame] = {}\n self.__producer_pairs_df: Dict[str,\n Dict[PairWithTimeframe, Tuple[DataFrame, datetime]]] = {}\n self.__producer_pairs: Dict[str, List[str]] = {}\n self._msg_queue: deque = deque()\n\n self._default_candle_type = self._config.get('candle_type_def', CandleType.SPOT)\n self._default_timeframe = self._config.get('timeframe', '1h')\n\n self.__msg_cache = PeriodicCache(\n maxsize=1000, ttl=timeframe_to_seconds(self._default_timeframe))\n\n self.producers = self._config.get('external_message_consumer', {}).get('producers', [])\n self.external_data_enabled = len(self.producers) > 0\n\n def _set_dataframe_max_index(self, limit_index: int):\n \"\"\"\n Limit analyzed dataframe to max specified index.\n Only relevant in backtesting.\n :param limit_index: dataframe index.\n \"\"\"\n self.__slice_index = limit_index\n\n def _set_dataframe_max_date(self, limit_date: datetime):\n \"\"\"\n Limit infomrative dataframe to max specified index.\n Only relevant in backtesting.\n :param limit_date: \"current date\"\n \"\"\"\n self.__slice_date = limit_date\n\n def _set_cached_df(\n self,\n pair: str,\n timeframe: str,\n dataframe: DataFrame,\n candle_type: CandleType\n ) -> None:\n \"\"\"\n Store cached Dataframe.\n Using private method as this should never be used by a user\n (but the class is exposed via `self.dp` to the strategy)\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param dataframe: analyzed dataframe\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n \"\"\"\n pair_key = (pair, timeframe, candle_type)\n self.__cached_pairs[pair_key] = (\n dataframe, datetime.now(timezone.utc))\n\n # For multiple producers we will want to merge the pairlists instead of overwriting\n def _set_producer_pairs(self, pairlist: List[str], producer_name: str = \"default\"):\n \"\"\"\n Set the pairs received to later be used.\n\n :param pairlist: List of pairs\n \"\"\"\n self.__producer_pairs[producer_name] = pairlist\n\n def get_producer_pairs(self, producer_name: str = \"default\") -> List[str]:\n \"\"\"\n Get the pairs cached from the producer\n\n :returns: List of pairs\n \"\"\"\n return self.__producer_pairs.get(producer_name, []).copy()\n\n def _emit_df(\n self,\n pair_key: PairWithTimeframe,\n dataframe: DataFrame,\n new_candle: bool\n ) -> None:\n \"\"\"\n Send this dataframe as an ANALYZED_DF message to RPC\n\n :param pair_key: PairWithTimeframe tuple\n :param dataframe: Dataframe to emit\n :param new_candle: This is a new candle\n \"\"\"\n if self.__rpc:\n msg: RPCAnalyzedDFMsg = {\n 'type': RPCMessageType.ANALYZED_DF,\n 'data': {\n 'key': pair_key,\n 'df': dataframe.tail(1),\n 'la': datetime.now(timezone.utc)\n }\n }\n self.__rpc.send_msg(msg)\n if new_candle:\n self.__rpc.send_msg({\n 'type': RPCMessageType.NEW_CANDLE,\n 'data': pair_key,\n })\n\n def _replace_external_df(\n self,\n pair: str,\n dataframe: DataFrame,\n last_analyzed: datetime,\n timeframe: str,\n candle_type: CandleType,\n producer_name: str = \"default\"\n ) -> None:\n \"\"\"\n Add the pair data to this class from an external source.\n\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n \"\"\"\n pair_key = (pair, timeframe, candle_type)\n\n if producer_name not in self.__producer_pairs_df:\n self.__producer_pairs_df[producer_name] = {}\n\n _last_analyzed = datetime.now(timezone.utc) if not last_analyzed else last_analyzed\n\n self.__producer_pairs_df[producer_name][pair_key] = (dataframe, _last_analyzed)\n logger.debug(f\"External DataFrame for {pair_key} from {producer_name} added.\")\n\n def _add_external_df(\n self,\n pair: str,\n dataframe: DataFrame,\n last_analyzed: datetime,\n timeframe: str,\n candle_type: CandleType,\n producer_name: str = \"default\"\n ) -> Tuple[bool, int]:\n \"\"\"\n Append a candle to the existing external dataframe. The incoming dataframe\n must have at least 1 candle.\n\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n :returns: False if the candle could not be appended, or the int number of missing candles.\n \"\"\"\n pair_key = (pair, timeframe, candle_type)\n\n if dataframe.empty:\n # The incoming dataframe must have at least 1 candle\n return (False, 0)\n\n if len(dataframe) >= FULL_DATAFRAME_THRESHOLD:\n # This is likely a full dataframe\n # Add the dataframe to the dataprovider\n self._replace_external_df(\n pair,\n dataframe,\n last_analyzed=last_analyzed,\n timeframe=timeframe,\n candle_type=candle_type,\n producer_name=producer_name\n )\n return (True, 0)\n\n if (producer_name not in self.__producer_pairs_df\n or pair_key not in self.__producer_pairs_df[producer_name]):\n # We don't have data from this producer yet,\n # or we don't have data for this pair_key\n # return False and 1000 for the full df\n return (False, 1000)\n\n existing_df, _ = self.__producer_pairs_df[producer_name][pair_key]\n\n # CHECK FOR MISSING CANDLES\n # Convert the timeframe to a timedelta for pandas\n timeframe_delta: Timedelta = to_timedelta(timeframe)\n local_last: Timestamp = existing_df.iloc[-1]['date'] # We want the last date from our copy\n # We want the first date from the incoming\n incoming_first: Timestamp = dataframe.iloc[0]['date']\n\n # Remove existing candles that are newer than the incoming first candle\n existing_df1 = existing_df[existing_df['date'] < incoming_first]\n\n candle_difference = (incoming_first - local_last) / timeframe_delta\n\n # If the difference divided by the timeframe is 1, then this\n # is the candle we want and the incoming data isn't missing any.\n # If the candle_difference is more than 1, that means\n # we missed some candles between our data and the incoming\n # so return False and candle_difference.\n if candle_difference > 1:\n return (False, int(candle_difference))\n if existing_df1.empty:\n appended_df = dataframe\n else:\n appended_df = append_candles_to_dataframe(existing_df1, dataframe)\n\n # Everything is good, we appended\n self._replace_external_df(\n pair,\n appended_df,\n last_analyzed=last_analyzed,\n timeframe=timeframe,\n candle_type=candle_type,\n producer_name=producer_name\n )\n return (True, 0)\n\n def get_producer_df(\n self,\n pair: str,\n timeframe: Optional[str] = None,\n candle_type: Optional[CandleType] = None,\n producer_name: str = \"default\"\n ) -> Tuple[DataFrame, datetime]:\n \"\"\"\n Get the pair data from producers.\n\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: Any of the enum CandleType (must match trading mode!)\n :returns: Tuple of the DataFrame and last analyzed timestamp\n \"\"\"\n _timeframe = self._default_timeframe if not timeframe else timeframe\n _candle_type = self._default_candle_type if not candle_type else candle_type\n\n pair_key = (pair, _timeframe, _candle_type)\n\n # If we have no data from this Producer yet\n if producer_name not in self.__producer_pairs_df:\n # We don't have this data yet, return empty DataFrame and datetime (01-01-1970)\n return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))\n\n # If we do have data from that Producer, but no data on this pair_key\n if pair_key not in self.__producer_pairs_df[producer_name]:\n # We don't have this data yet, return empty DataFrame and datetime (01-01-1970)\n return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))\n\n # We have it, return this data\n df, la = self.__producer_pairs_df[producer_name][pair_key]\n return (df.copy(), la)\n\n def add_pairlisthandler(self, pairlists) -> None:\n \"\"\"\n Allow adding pairlisthandler after initialization\n \"\"\"\n self._pairlists = pairlists\n\n def historic_ohlcv(\n self,\n pair: str,\n timeframe: str,\n candle_type: str = ''\n ) -> DataFrame:\n \"\"\"\n Get stored historical candle (OHLCV) data\n :param pair: pair to get the data for\n :param timeframe: timeframe to get data for\n :param candle_type: '', mark, index, premiumIndex, or funding_rate\n \"\"\"\n _candle_type = CandleType.from_string(\n candle_type) if candle_type != '' else self._config['candle_type_def']\n saved_pair: PairWithTimeframe = (pair, str(timeframe), _candle_type)\n if saved_pair not in self.__cached_pairs_backtesting:\n timerange = TimeRange.parse_timerange(None if self._config.get(\n 'timerange') is None else str(self._config.get('timerange')))\n\n # It is not necessary to add the training candles, as they\n # were already added at the beginning of the backtest.\n startup_candles = self.get_required_startup(str(timeframe), False)\n tf_seconds = timeframe_to_seconds(str(timeframe))\n timerange.subtract_start(tf_seconds * startup_candles)\n self.__cached_pairs_backtesting[saved_pair] = load_pair_history(\n pair=pair,\n timeframe=timeframe,\n datadir=self._config['datadir'],\n timerange=timerange,\n data_format=self._config['dataformat_ohlcv'],\n candle_type=_candle_type,\n\n )\n return self.__cached_pairs_backtesting[saved_pair].copy()\n\n def get_required_startup(self, timeframe: str, add_train_candles: bool = True) -> int:\n freqai_config = self._config.get('freqai', {})\n if not freqai_config.get('enabled', False):\n return self._config.get('startup_candle_count', 0)\n else:\n startup_candles = self._config.get('startup_candle_count', 0)\n indicator_periods = freqai_config['feature_parameters']['indicator_periods_candles']\n # make sure the startupcandles is at least the set maximum indicator periods\n self._config['startup_candle_count'] = max(startup_candles, max(indicator_periods))\n tf_seconds = timeframe_to_seconds(timeframe)\n train_candles = 0\n if add_train_candles:\n train_candles = freqai_config['train_period_days'] * 86400 / tf_seconds\n total_candles = int(self._config['startup_candle_count'] + train_candles)\n logger.info(f'Increasing startup_candle_count for freqai to {total_candles}')\n return total_candles\n\n def get_pair_dataframe(\n self,\n pair: str,\n timeframe: Optional[str] = None,\n candle_type: str = ''\n ) -> DataFrame:\n \"\"\"\n Return pair candle (OHLCV) data, either live or cached historical -- depending\n on the runmode.\n Only combinations in the pairlist or which have been specified as informative pairs\n will be available.\n :param pair: pair to get the data for\n :param timeframe: timeframe to get data for\n :return: Dataframe for this pair\n :param candle_type: '', mark, index, premiumIndex, or funding_rate\n \"\"\"\n if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):\n # Get live OHLCV data.\n data = self.ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)\n else:\n # Get historical OHLCV data (cached on disk).\n timeframe = timeframe or self._config['timeframe']\n data = self.historic_ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)\n # Cut date to timeframe-specific date.\n # This is necessary to prevent lookahead bias in callbacks through informative pairs.\n if self.__slice_date:\n cutoff_date = timeframe_to_prev_date(timeframe, self.__slice_date)\n data = data.loc[data['date'] < cutoff_date]\n if len(data) == 0:\n logger.warning(f\"No data found for ({pair}, {timeframe}, {candle_type}).\")\n return data\n\n def get_analyzed_dataframe(self, pair: str, timeframe: str) -> Tuple[DataFrame, datetime]:\n \"\"\"\n Retrieve the analyzed dataframe. Returns the full dataframe in trade mode (live / dry),\n and the last 1000 candles (up to the time evaluated at this moment) in all other modes.\n :param pair: pair to get the data for\n :param timeframe: timeframe to get data for\n :return: Tuple of (Analyzed Dataframe, lastrefreshed) for the requested pair / timeframe\n combination.\n Returns empty dataframe and Epoch 0 (1970-01-01) if no dataframe was cached.\n \"\"\"\n pair_key = (pair, timeframe, self._config.get('candle_type_def', CandleType.SPOT))\n if pair_key in self.__cached_pairs:\n if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):\n df, date = self.__cached_pairs[pair_key]\n else:\n df, date = self.__cached_pairs[pair_key]\n if self.__slice_index is not None:\n max_index = self.__slice_index\n df = df.iloc[max(0, max_index - MAX_DATAFRAME_CANDLES):max_index]\n return df, date\n else:\n return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))\n\n @property\n def runmode(self) -> RunMode:\n \"\"\"\n Get runmode of the bot\n can be \"live\", \"dry-run\", \"backtest\", \"edgecli\", \"hyperopt\" or \"other\".\n \"\"\"\n return RunMode(self._config.get('runmode', RunMode.OTHER))\n\n def current_whitelist(self) -> List[str]:\n \"\"\"\n fetch latest available whitelist.\n\n Useful when you have a large whitelist and need to call each pair as an informative pair.\n As available pairs does not show whitelist until after informative pairs have been cached.\n :return: list of pairs in whitelist\n \"\"\"\n\n if self._pairlists:\n return self._pairlists.whitelist.copy()\n else:\n raise OperationalException(\"Dataprovider was not initialized with a pairlist provider.\")\n\n def clear_cache(self):\n \"\"\"\n Clear pair dataframe cache.\n \"\"\"\n self.__cached_pairs = {}\n # Don't reset backtesting pairs -\n # otherwise they're reloaded each time during hyperopt due to with analyze_per_epoch\n # self.__cached_pairs_backtesting = {}\n self.__slice_index = 0\n\n # Exchange functions\n\n def refresh(self,\n pairlist: ListPairsWithTimeframes,\n helping_pairs: Optional[ListPairsWithTimeframes] = None) -> None:\n \"\"\"\n Refresh data, called with each cycle\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n final_pairs = (pairlist + helping_pairs) if helping_pairs else pairlist\n self._exchange.refresh_latest_ohlcv(final_pairs)\n\n @property\n def available_pairs(self) -> ListPairsWithTimeframes:\n \"\"\"\n Return a list of tuples containing (pair, timeframe) for which data is currently cached.\n Should be whitelist + open trades.\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n return list(self._exchange._klines.keys())\n\n def ohlcv(\n self,\n pair: str,\n timeframe: Optional[str] = None,\n copy: bool = True,\n candle_type: str = ''\n ) -> DataFrame:\n \"\"\"\n Get candle (OHLCV) data for the given pair as DataFrame\n Please use the `available_pairs` method to verify which pairs are currently cached.\n :param pair: pair to get the data for\n :param timeframe: Timeframe to get data for\n :param candle_type: '', mark, index, premiumIndex, or funding_rate\n :param copy: copy dataframe before returning if True.\n Use False only for read-only operations (where the dataframe is not modified)\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):\n _candle_type = CandleType.from_string(\n candle_type) if candle_type != '' else self._config['candle_type_def']\n return self._exchange.klines(\n (pair, timeframe or self._config['timeframe'], _candle_type),\n copy=copy\n )\n else:\n return DataFrame()\n\n def market(self, pair: str) -> Optional[Dict[str, Any]]:\n \"\"\"\n Return market data for the pair\n :param pair: Pair to get the data for\n :return: Market data dict from ccxt or None if market info is not available for the pair\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n return self._exchange.markets.get(pair)\n\n def ticker(self, pair: str):\n \"\"\"\n Return last ticker data from exchange\n :param pair: Pair to get the data for\n :return: Ticker dict from exchange or empty dict if ticker is not available for the pair\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n try:\n return self._exchange.fetch_ticker(pair)\n except ExchangeError:\n return {}\n\n def orderbook(self, pair: str, maximum: int) -> OrderBook:\n \"\"\"\n Fetch latest l2 orderbook data\n Warning: Does a network request - so use with common sense.\n :param pair: pair to get the data for\n :param maximum: Maximum number of orderbook entries to query\n :return: dict including bids/asks with a total of `maximum` entries.\n \"\"\"\n if self._exchange is None:\n raise OperationalException(NO_EXCHANGE_EXCEPTION)\n return self._exchange.fetch_l2_order_book(pair, maximum)\n\n def send_msg(self, message: str, *, always_send: bool = False) -> None:\n \"\"\"\n Send custom RPC Notifications from your bot.\n Will not send any bot in modes other than Dry-run or Live.\n :param message: Message to be sent. Must be below 4096.\n :param always_send: If False, will send the message only once per candle, and surpress\n identical messages.\n Careful as this can end up spaming your chat.\n Defaults to False\n \"\"\"\n if self.runmode not in (RunMode.DRY_RUN, RunMode.LIVE):\n return\n\n if always_send or message not in self.__msg_cache:\n self._msg_queue.append(message)\n self.__msg_cache[message] = True" }, { "identifier": "CandleType", "path": "freqtrade/enums/candletype.py", "snippet": "class CandleType(str, Enum):\n \"\"\"Enum to distinguish candle types\"\"\"\n SPOT = \"spot\"\n FUTURES = \"futures\"\n MARK = \"mark\"\n INDEX = \"index\"\n PREMIUMINDEX = \"premiumIndex\"\n\n # TODO: Could take up less memory if these weren't a CandleType\n FUNDING_RATE = \"funding_rate\"\n # BORROW_RATE = \"borrow_rate\" # * unimplemented\n\n def __str__(self):\n return f\"{self.name.lower()}\"\n\n @staticmethod\n def from_string(value: str) -> 'CandleType':\n if not value:\n # Default to spot\n return CandleType.SPOT\n return CandleType(value)\n\n @staticmethod\n def get_default(trading_mode: str) -> 'CandleType':\n if trading_mode == 'futures':\n return CandleType.FUTURES\n return CandleType.SPOT" }, { "identifier": "StrategyResolver", "path": "freqtrade/resolvers/strategy_resolver.py", "snippet": "class StrategyResolver(IResolver):\n \"\"\"\n This class contains the logic to load custom strategy class\n \"\"\"\n object_type = IStrategy\n object_type_str = \"Strategy\"\n user_subdir = USERPATH_STRATEGIES\n initial_search_path = None\n extra_path = \"strategy_path\"\n\n @staticmethod\n def load_strategy(config: Optional[Config] = None) -> IStrategy:\n \"\"\"\n Load the custom class from config parameter\n :param config: configuration dictionary or None\n \"\"\"\n config = config or {}\n\n if not config.get('strategy'):\n raise OperationalException(\"No strategy set. Please use `--strategy` to specify \"\n \"the strategy class to use.\")\n\n strategy_name = config['strategy']\n strategy: IStrategy = StrategyResolver._load_strategy(\n strategy_name, config=config,\n extra_dir=config.get('strategy_path'))\n strategy.ft_load_params_from_file()\n # Set attributes\n # Check if we need to override configuration\n # (Attribute name, default, subkey)\n attributes = [(\"minimal_roi\", {\"0\": 10.0}),\n (\"timeframe\", None),\n (\"stoploss\", None),\n (\"trailing_stop\", None),\n (\"trailing_stop_positive\", None),\n (\"trailing_stop_positive_offset\", 0.0),\n (\"trailing_only_offset_is_reached\", None),\n (\"use_custom_stoploss\", None),\n (\"process_only_new_candles\", None),\n (\"order_types\", None),\n (\"order_time_in_force\", None),\n (\"stake_currency\", None),\n (\"stake_amount\", None),\n (\"protections\", None),\n (\"startup_candle_count\", None),\n (\"unfilledtimeout\", None),\n (\"use_exit_signal\", True),\n (\"exit_profit_only\", False),\n (\"ignore_roi_if_entry_signal\", False),\n (\"exit_profit_offset\", 0.0),\n (\"disable_dataframe_checks\", False),\n (\"ignore_buying_expired_candle_after\", 0),\n (\"position_adjustment_enable\", False),\n (\"max_entry_position_adjustment\", -1),\n (\"max_open_trades\", -1)\n ]\n for attribute, default in attributes:\n StrategyResolver._override_attribute_helper(strategy, config,\n attribute, default)\n\n # Loop this list again to have output combined\n for attribute, _ in attributes:\n if attribute in config:\n logger.info(\"Strategy using %s: %s\", attribute, config[attribute])\n\n StrategyResolver._normalize_attributes(strategy)\n\n StrategyResolver._strategy_sanity_validations(strategy)\n return strategy\n\n @staticmethod\n def _override_attribute_helper(strategy, config: Config, attribute: str, default: Any):\n \"\"\"\n Override attributes in the strategy.\n Prevalence:\n - Configuration\n - Strategy\n - default (if not None)\n \"\"\"\n if (attribute in config\n and not isinstance(getattr(type(strategy), attribute, None), property)):\n # Ensure Properties are not overwritten\n setattr(strategy, attribute, config[attribute])\n logger.info(\"Override strategy '%s' with value in config file: %s.\",\n attribute, config[attribute])\n elif hasattr(strategy, attribute):\n val = getattr(strategy, attribute)\n # None's cannot exist in the config, so do not copy them\n if val is not None:\n # max_open_trades set to -1 in the strategy will be copied as infinity in the config\n if attribute == 'max_open_trades' and val == -1:\n config[attribute] = float('inf')\n else:\n config[attribute] = val\n # Explicitly check for None here as other \"falsy\" values are possible\n elif default is not None:\n setattr(strategy, attribute, default)\n config[attribute] = default\n\n @staticmethod\n def _normalize_attributes(strategy: IStrategy) -> IStrategy:\n \"\"\"\n Normalize attributes to have the correct type.\n \"\"\"\n # Sort and apply type conversions\n if hasattr(strategy, 'minimal_roi'):\n strategy.minimal_roi = dict(sorted(\n {int(key): value for (key, value) in strategy.minimal_roi.items()}.items(),\n key=lambda t: t[0]))\n if hasattr(strategy, 'stoploss'):\n strategy.stoploss = float(strategy.stoploss)\n if hasattr(strategy, 'max_open_trades') and strategy.max_open_trades < 0:\n strategy.max_open_trades = float('inf')\n return strategy\n\n @staticmethod\n def _strategy_sanity_validations(strategy: IStrategy):\n # Ensure necessary migrations are performed first.\n validate_migrated_strategy_settings(strategy.config)\n\n if not all(k in strategy.order_types for k in REQUIRED_ORDERTYPES):\n raise ImportError(f\"Impossible to load Strategy '{strategy.__class__.__name__}'. \"\n f\"Order-types mapping is incomplete.\")\n if not all(k in strategy.order_time_in_force for k in REQUIRED_ORDERTIF):\n raise ImportError(f\"Impossible to load Strategy '{strategy.__class__.__name__}'. \"\n f\"Order-time-in-force mapping is incomplete.\")\n trading_mode = strategy.config.get('trading_mode', TradingMode.SPOT)\n\n if (strategy.can_short and trading_mode == TradingMode.SPOT):\n raise ImportError(\n \"Short strategies cannot run in spot markets. Please make sure that this \"\n \"is the correct strategy and that your trading mode configuration is correct. \"\n \"You can run this strategy in spot markets by setting `can_short=False`\"\n \" in your strategy. Please note that short signals will be ignored in that case.\"\n )\n\n @staticmethod\n def validate_strategy(strategy: IStrategy) -> IStrategy:\n if strategy.config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:\n # Require new method\n warn_deprecated_setting(strategy, 'sell_profit_only', 'exit_profit_only', True)\n warn_deprecated_setting(strategy, 'sell_profit_offset', 'exit_profit_offset', True)\n warn_deprecated_setting(strategy, 'use_sell_signal', 'use_exit_signal', True)\n warn_deprecated_setting(strategy, 'ignore_roi_if_buy_signal',\n 'ignore_roi_if_entry_signal', True)\n\n if not check_override(strategy, IStrategy, 'populate_entry_trend'):\n raise OperationalException(\"`populate_entry_trend` must be implemented.\")\n if not check_override(strategy, IStrategy, 'populate_exit_trend'):\n raise OperationalException(\"`populate_exit_trend` must be implemented.\")\n if check_override(strategy, IStrategy, 'check_buy_timeout'):\n raise OperationalException(\"Please migrate your implementation \"\n \"of `check_buy_timeout` to `check_entry_timeout`.\")\n if check_override(strategy, IStrategy, 'check_sell_timeout'):\n raise OperationalException(\"Please migrate your implementation \"\n \"of `check_sell_timeout` to `check_exit_timeout`.\")\n\n if check_override(strategy, IStrategy, 'custom_sell'):\n raise OperationalException(\n \"Please migrate your implementation of `custom_sell` to `custom_exit`.\")\n\n else:\n # TODO: Implementing one of the following methods should show a deprecation warning\n # buy_trend and sell_trend, custom_sell\n warn_deprecated_setting(strategy, 'sell_profit_only', 'exit_profit_only')\n warn_deprecated_setting(strategy, 'sell_profit_offset', 'exit_profit_offset')\n warn_deprecated_setting(strategy, 'use_sell_signal', 'use_exit_signal')\n warn_deprecated_setting(strategy, 'ignore_roi_if_buy_signal',\n 'ignore_roi_if_entry_signal')\n\n if (\n not check_override(strategy, IStrategy, 'populate_buy_trend')\n and not check_override(strategy, IStrategy, 'populate_entry_trend')\n ):\n raise OperationalException(\n \"`populate_entry_trend` or `populate_buy_trend` must be implemented.\")\n if (\n not check_override(strategy, IStrategy, 'populate_sell_trend')\n and not check_override(strategy, IStrategy, 'populate_exit_trend')\n ):\n raise OperationalException(\n \"`populate_exit_trend` or `populate_sell_trend` must be implemented.\")\n\n _populate_fun_len = len(getfullargspec(strategy.populate_indicators).args)\n _buy_fun_len = len(getfullargspec(strategy.populate_buy_trend).args)\n _sell_fun_len = len(getfullargspec(strategy.populate_sell_trend).args)\n if any(x == 2 for x in [\n _populate_fun_len,\n _buy_fun_len,\n _sell_fun_len\n ]):\n raise OperationalException(\n \"Strategy Interface v1 is no longer supported. \"\n \"Please update your strategy to implement \"\n \"`populate_indicators`, `populate_entry_trend` and `populate_exit_trend` \"\n \"with the metadata argument. \")\n\n has_after_fill = ('after_fill' in getfullargspec(strategy.custom_stoploss).args\n and check_override(strategy, IStrategy, 'custom_stoploss'))\n if has_after_fill:\n strategy._ft_stop_uses_after_fill = True\n\n return strategy\n\n @staticmethod\n def _load_strategy(strategy_name: str,\n config: Config, extra_dir: Optional[str] = None) -> IStrategy:\n \"\"\"\n Search and loads the specified strategy.\n :param strategy_name: name of the module to import\n :param config: configuration for the strategy\n :param extra_dir: additional directory to search for the given strategy\n :return: Strategy instance or None\n \"\"\"\n if config.get('recursive_strategy_search', False):\n extra_dirs: List[str] = [\n path[0] for path in walk(f\"{config['user_data_dir']}/{USERPATH_STRATEGIES}\")\n ] # sub-directories\n else:\n extra_dirs = []\n\n if extra_dir:\n extra_dirs.append(extra_dir)\n\n abs_paths = StrategyResolver.build_search_paths(config,\n user_subdir=USERPATH_STRATEGIES,\n extra_dirs=extra_dirs)\n\n if \":\" in strategy_name:\n logger.info(\"loading base64 encoded strategy\")\n strat = strategy_name.split(\":\")\n\n if len(strat) == 2:\n temp = Path(tempfile.mkdtemp(\"freq\", \"strategy\"))\n name = strat[0] + \".py\"\n\n temp.joinpath(name).write_text(urlsafe_b64decode(strat[1]).decode('utf-8'))\n temp.joinpath(\"__init__.py\").touch()\n\n strategy_name = strat[0]\n\n # register temp path with the bot\n abs_paths.insert(0, temp.resolve())\n\n strategy = StrategyResolver._load_object(\n paths=abs_paths,\n object_name=strategy_name,\n add_source=True,\n kwargs={'config': config},\n )\n\n if strategy:\n\n return StrategyResolver.validate_strategy(strategy)\n\n raise OperationalException(\n f\"Impossible to load Strategy '{strategy_name}'. This class does not exist \"\n \"or contains Python code errors.\"\n )" }, { "identifier": "merge_informative_pair", "path": "freqtrade/strategy/strategy_helper.py", "snippet": "def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True,\n append_timeframe: bool = True,\n date_column: str = 'date',\n suffix: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n :param append_timeframe: Rename columns by appending timeframe.\n :param date_column: A custom date column name.\n :param suffix: A string suffix to add at the end of the informative columns. If specified,\n append_timeframe must be false.\n :return: Merged dataframe\n :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[date_column]\n elif minutes < minutes_inf:\n # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n if not informative.empty:\n informative['date_merge'] = (\n informative[date_column] + pd.to_timedelta(minutes_inf, 'm') -\n pd.to_timedelta(minutes, 'm')\n )\n else:\n informative['date_merge'] = informative[date_column]\n else:\n raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n \"This would create new rows, and can throw off your regular indicators.\")\n\n # Rename columns to be unique\n date_merge = 'date_merge'\n if suffix and append_timeframe:\n raise ValueError(\"You can not specify `append_timeframe` as True and a `suffix`.\")\n elif append_timeframe:\n date_merge = f'date_merge_{timeframe_inf}'\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n elif suffix:\n date_merge = f'date_merge_{suffix}'\n informative.columns = [f\"{col}_{suffix}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n if ffill:\n # https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging\n # merge_ordered - ffill method is 2.5x faster than seperate ffill()\n dataframe = pd.merge_ordered(dataframe, informative, fill_method=\"ffill\", left_on='date',\n right_on=date_merge, how='left')\n else:\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=date_merge, how='left')\n dataframe = dataframe.drop(date_merge, axis=1)\n\n # if ffill:\n # dataframe = dataframe.ffill()\n\n return dataframe" }, { "identifier": "stoploss_from_absolute", "path": "freqtrade/strategy/strategy_helper.py", "snippet": "def stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False,\n leverage: float = 1.0) -> float:\n \"\"\"\n Given current price and desired stop price, return a stop loss value that is relative to current\n price.\n\n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n\n Returns 0 if the resulting stop price would be above the current price.\n\n :param stop_rate: Stop loss price.\n :param current_rate: Current asset price.\n :param is_short: When true, perform the calculation for short instead of long\n :param leverage: Leverage to use for the calculation\n :return: Positive stop loss value relative to current price\n \"\"\"\n\n # formula is undefined for current_rate 0, return maximum value\n if current_rate == 0:\n return 1\n\n stoploss = 1 - (stop_rate / current_rate)\n if is_short:\n stoploss = -stoploss\n\n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n # shorts can yield stoploss values higher than 1, so limit that as well\n return max(min(stoploss, 1.0), 0.0) * leverage" }, { "identifier": "stoploss_from_open", "path": "freqtrade/strategy/strategy_helper.py", "snippet": "def stoploss_from_open(\n open_relative_stop: float,\n current_profit: float,\n is_short: bool = False,\n leverage: float = 1.0\n) -> float:\n \"\"\"\n Given the current profit, and a desired stop loss value relative to the trade entry price,\n return a stop loss value that is relative to the current price, and which can be\n returned from `custom_stoploss`.\n\n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n `open_relative_stop` will be considered as adjusted for leverage if leverage is provided..\n\n Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price\n\n :param open_relative_stop: Desired stop loss percentage, relative to the open price,\n adjusted for leverage\n :param current_profit: The current profit percentage\n :param is_short: When true, perform the calculation for short instead of long\n :param leverage: Leverage to use for the calculation\n :return: Stop loss value relative to current price\n \"\"\"\n\n # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value\n _current_profit = current_profit / leverage\n if (_current_profit == -1 and not is_short) or (is_short and _current_profit == 1):\n return 1\n\n if is_short is True:\n stoploss = -1 + ((1 - open_relative_stop / leverage) / (1 - _current_profit))\n else:\n stoploss = 1 - ((1 + open_relative_stop / leverage) / (1 + _current_profit))\n\n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n return max(stoploss * leverage, 0.0)" }, { "identifier": "generate_test_data", "path": "tests/conftest.py", "snippet": "def generate_test_data(timeframe: str, size: int, start: str = '2020-07-05'):\n np.random.seed(42)\n tf_mins = timeframe_to_minutes(timeframe)\n\n base = np.random.normal(20, 2, size=size)\n\n date = pd.date_range(start, periods=size, freq=f'{tf_mins}min', tz='UTC')\n df = pd.DataFrame({\n 'date': date,\n 'open': base,\n 'high': base + np.random.normal(2, 1, size=size),\n 'low': base - np.random.normal(2, 1, size=size),\n 'close': base + np.random.normal(0, 1, size=size),\n 'volume': np.random.normal(200, size=size)\n }\n )\n df = df.dropna()\n return df" }, { "identifier": "get_patched_exchange", "path": "tests/conftest.py", "snippet": "def get_patched_exchange(mocker, config, api_mock=None, id='binance',\n mock_markets=True, mock_supported_modes=True) -> Exchange:\n patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes)\n config['exchange']['name'] = id\n try:\n exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True)\n except ImportError:\n exchange = Exchange(config)\n return exchange" } ]
import numpy as np import pandas as pd import pytest from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import CandleType from freqtrade.resolvers.strategy_resolver import StrategyResolver from freqtrade.strategy import merge_informative_pair, stoploss_from_absolute, stoploss_from_open from tests.conftest import generate_test_data, get_patched_exchange
13,539
for open_range in open_price_ranges: for open_price in np.linspace(*open_range): for desired_stop in np.linspace(-0.50, 0.50, 30): if side == 'long': # -1 is not a valid current_profit, should return 1 assert stoploss_from_open(desired_stop, -1) == 1 else: # 1 is not a valid current_profit for shorts, should return 1 assert stoploss_from_open(desired_stop, 1, True) == 1 for current_profit in np.linspace(*profitrange): if side == 'long': current_price = open_price * (1 + current_profit) expected_stop_price = open_price * (1 + desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit) stop_price = current_price * (1 - stoploss) else: current_price = open_price * (1 - current_profit) expected_stop_price = open_price * (1 - desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit, True) stop_price = current_price * (1 + stoploss) assert stoploss >= 0 # Technically the formula can yield values greater than 1 for shorts # eventhough it doesn't make sense because the position would be liquidated if side == 'long': assert stoploss <= 1 # there is no correct answer if the expected stop price is above # the current price if ((side == 'long' and expected_stop_price > current_price) or (side == 'short' and expected_stop_price < current_price)): assert stoploss == 0 else: assert pytest.approx(stop_price) == expected_stop_price @pytest.mark.parametrize("side,rel_stop,curr_profit,leverage,expected", [ # profit range for long is [-1, inf] while for shorts is [-inf, 1] ("long", 0, -1, 1, 1), ("long", 0, 0.1, 1, 0.09090909), ("long", -0.1, 0.1, 1, 0.18181818), ("long", 0.1, 0.2, 1, 0.08333333), ("long", 0.1, 0.5, 1, 0.266666666), ("long", 0.1, 5, 1, 0.816666666), # 500% profit, set stoploss to 10% above open price ("long", 0, 5, 10, 3.3333333), # 500% profit, set stoploss break even ("long", 0.1, 5, 10, 3.26666666), # 500% profit, set stoploss to 10% above open price ("long", -0.1, 5, 10, 3.3999999), # 500% profit, set stoploss to 10% belowopen price ("short", 0, 0.1, 1, 0.1111111), ("short", -0.1, 0.1, 1, 0.2222222), ("short", 0.1, 0.2, 1, 0.125), ("short", 0.1, 1, 1, 1), ("short", -0.01, 5, 10, 10.01999999), # 500% profit at 10x ]) def test_stoploss_from_open_leverage(side, rel_stop, curr_profit, leverage, expected): stoploss = stoploss_from_open(rel_stop, curr_profit, side == 'short', leverage) assert pytest.approx(stoploss) == expected open_rate = 100 if stoploss != 1: if side == 'long': current_rate = open_rate * (1 + curr_profit / leverage) stop = current_rate * (1 - stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 + rel_stop / leverage) else: current_rate = open_rate * (1 - curr_profit / leverage) stop = current_rate * (1 + stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 - rel_stop / leverage) def test_stoploss_from_absolute(): assert pytest.approx(stoploss_from_absolute(90, 100)) == 1 - (90 / 100) assert pytest.approx(stoploss_from_absolute(90, 100)) == 0.1 assert pytest.approx(stoploss_from_absolute(95, 100)) == 0.05 assert pytest.approx(stoploss_from_absolute(100, 100)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100)) == 0 assert pytest.approx(stoploss_from_absolute(100, 0)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, False, leverage=5)) == 5 assert pytest.approx(stoploss_from_absolute(90, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100, True)) == -(1 - (110 / 100)) assert pytest.approx(stoploss_from_absolute(110, 100, True)) == 0.1 assert pytest.approx(stoploss_from_absolute(105, 100, True)) == 0.05 assert pytest.approx(stoploss_from_absolute(105, 100, True, 5)) == 0.05 * 5 assert pytest.approx(stoploss_from_absolute(100, 0, True)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True)) == 1 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True, leverage=5)) == 5 @pytest.mark.parametrize('trading_mode', ['futures', 'spot']) def test_informative_decorator(mocker, default_conf_usdt, trading_mode): candle_def = CandleType.get_default(trading_mode) default_conf_usdt['candle_type_def'] = candle_def test_data_5m = generate_test_data('5m', 40) test_data_30m = generate_test_data('30m', 40) test_data_1h = generate_test_data('1h', 40) data = { ('XRP/USDT', '5m', candle_def): test_data_5m, ('XRP/USDT', '30m', candle_def): test_data_30m, ('XRP/USDT', '1h', candle_def): test_data_1h, ('LTC/USDT', '5m', candle_def): test_data_5m, ('LTC/USDT', '30m', candle_def): test_data_30m, ('LTC/USDT', '1h', candle_def): test_data_1h, ('NEO/USDT', '30m', candle_def): test_data_30m, ('NEO/USDT', '5m', CandleType.SPOT): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '15m', candle_def): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '30m', candle_def): test_data_30m, ('ETH/BTC', '1h', CandleType.SPOT): test_data_1h, # Explicitly selected as spot } default_conf_usdt['strategy'] = 'InformativeDecoratorTest' strategy = StrategyResolver.load_strategy(default_conf_usdt) exchange = get_patched_exchange(mocker, default_conf_usdt)
def test_merge_informative_pair(): data = generate_test_data('15m', 40) informative = generate_test_data('1h', 40) result = merge_informative_pair(data, informative, '15m', '1h', ffill=True) assert isinstance(result, pd.DataFrame) assert len(result) == len(data) assert 'date' in result.columns assert result['date'].equals(data['date']) assert 'date_1h' in result.columns assert 'open' in result.columns assert 'open_1h' in result.columns assert result['open'].equals(data['open']) assert 'close' in result.columns assert 'close_1h' in result.columns assert result['close'].equals(data['close']) assert 'volume' in result.columns assert 'volume_1h' in result.columns assert result['volume'].equals(data['volume']) # First 3 rows are empty assert result.iloc[0]['date_1h'] is pd.NaT assert result.iloc[1]['date_1h'] is pd.NaT assert result.iloc[2]['date_1h'] is pd.NaT # Next 4 rows contain the starting date (0:00) assert result.iloc[3]['date_1h'] == result.iloc[0]['date'] assert result.iloc[4]['date_1h'] == result.iloc[0]['date'] assert result.iloc[5]['date_1h'] == result.iloc[0]['date'] assert result.iloc[6]['date_1h'] == result.iloc[0]['date'] # Next 4 rows contain the next Hourly date original date row 4 assert result.iloc[7]['date_1h'] == result.iloc[4]['date'] assert result.iloc[8]['date_1h'] == result.iloc[4]['date'] informative = generate_test_data('1h', 40) result = merge_informative_pair(data, informative, '15m', '1h', ffill=False) # First 3 rows are empty assert result.iloc[0]['date_1h'] is pd.NaT assert result.iloc[1]['date_1h'] is pd.NaT assert result.iloc[2]['date_1h'] is pd.NaT # Next 4 rows contain the starting date (0:00) assert result.iloc[3]['date_1h'] == result.iloc[0]['date'] assert result.iloc[4]['date_1h'] is pd.NaT assert result.iloc[5]['date_1h'] is pd.NaT assert result.iloc[6]['date_1h'] is pd.NaT # Next 4 rows contain the next Hourly date original date row 4 assert result.iloc[7]['date_1h'] == result.iloc[4]['date'] assert result.iloc[8]['date_1h'] is pd.NaT def test_merge_informative_pair_same(): data = generate_test_data('15m', 40) informative = generate_test_data('15m', 40) result = merge_informative_pair(data, informative, '15m', '15m', ffill=True) assert isinstance(result, pd.DataFrame) assert len(result) == len(data) assert 'date' in result.columns assert result['date'].equals(data['date']) assert 'date_15m' in result.columns assert 'open' in result.columns assert 'open_15m' in result.columns assert result['open'].equals(data['open']) assert 'close' in result.columns assert 'close_15m' in result.columns assert result['close'].equals(data['close']) assert 'volume' in result.columns assert 'volume_15m' in result.columns assert result['volume'].equals(data['volume']) # Dates match 1:1 assert result['date_15m'].equals(result['date']) def test_merge_informative_pair_lower(): data = generate_test_data('1h', 40) informative = generate_test_data('15m', 40) with pytest.raises(ValueError, match=r"Tried to merge a faster timeframe .*"): merge_informative_pair(data, informative, '1h', '15m', ffill=True) def test_merge_informative_pair_empty(): data = generate_test_data('1h', 40) informative = pd.DataFrame(columns=data.columns) result = merge_informative_pair(data, informative, '1h', '2h', ffill=True) assert result['date'].equals(data['date']) assert list(result.columns) == [ 'date', 'open', 'high', 'low', 'close', 'volume', 'date_2h', 'open_2h', 'high_2h', 'low_2h', 'close_2h', 'volume_2h' ] # We merge an empty dataframe, so all values should be NaN for col in ['date_2h', 'open_2h', 'high_2h', 'low_2h', 'close_2h', 'volume_2h']: assert result[col].isnull().all() def test_merge_informative_pair_suffix(): data = generate_test_data('15m', 20) informative = generate_test_data('1h', 20) result = merge_informative_pair(data, informative, '15m', '1h', append_timeframe=False, suffix="suf") assert 'date' in result.columns assert result['date'].equals(data['date']) assert 'date_suf' in result.columns assert 'open_suf' in result.columns assert 'open_1h' not in result.columns assert list(result.columns) == [ 'date', 'open', 'high', 'low', 'close', 'volume', 'date_suf', 'open_suf', 'high_suf', 'low_suf', 'close_suf', 'volume_suf' ] def test_merge_informative_pair_suffix_append_timeframe(): data = generate_test_data('15m', 20) informative = generate_test_data('1h', 20) with pytest.raises(ValueError, match=r"You can not specify `append_timeframe` .*"): merge_informative_pair(data, informative, '15m', '1h', suffix="suf") @pytest.mark.parametrize("side,profitrange", [ # profit range for long is [-1, inf] while for shorts is [-inf, 1] ("long", [-0.99, 2, 30]), ("short", [-2.0, 0.99, 30]), ]) def test_stoploss_from_open(side, profitrange): open_price_ranges = [ [0.01, 1.00, 30], [1, 100, 30], [100, 10000, 30], ] for open_range in open_price_ranges: for open_price in np.linspace(*open_range): for desired_stop in np.linspace(-0.50, 0.50, 30): if side == 'long': # -1 is not a valid current_profit, should return 1 assert stoploss_from_open(desired_stop, -1) == 1 else: # 1 is not a valid current_profit for shorts, should return 1 assert stoploss_from_open(desired_stop, 1, True) == 1 for current_profit in np.linspace(*profitrange): if side == 'long': current_price = open_price * (1 + current_profit) expected_stop_price = open_price * (1 + desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit) stop_price = current_price * (1 - stoploss) else: current_price = open_price * (1 - current_profit) expected_stop_price = open_price * (1 - desired_stop) stoploss = stoploss_from_open(desired_stop, current_profit, True) stop_price = current_price * (1 + stoploss) assert stoploss >= 0 # Technically the formula can yield values greater than 1 for shorts # eventhough it doesn't make sense because the position would be liquidated if side == 'long': assert stoploss <= 1 # there is no correct answer if the expected stop price is above # the current price if ((side == 'long' and expected_stop_price > current_price) or (side == 'short' and expected_stop_price < current_price)): assert stoploss == 0 else: assert pytest.approx(stop_price) == expected_stop_price @pytest.mark.parametrize("side,rel_stop,curr_profit,leverage,expected", [ # profit range for long is [-1, inf] while for shorts is [-inf, 1] ("long", 0, -1, 1, 1), ("long", 0, 0.1, 1, 0.09090909), ("long", -0.1, 0.1, 1, 0.18181818), ("long", 0.1, 0.2, 1, 0.08333333), ("long", 0.1, 0.5, 1, 0.266666666), ("long", 0.1, 5, 1, 0.816666666), # 500% profit, set stoploss to 10% above open price ("long", 0, 5, 10, 3.3333333), # 500% profit, set stoploss break even ("long", 0.1, 5, 10, 3.26666666), # 500% profit, set stoploss to 10% above open price ("long", -0.1, 5, 10, 3.3999999), # 500% profit, set stoploss to 10% belowopen price ("short", 0, 0.1, 1, 0.1111111), ("short", -0.1, 0.1, 1, 0.2222222), ("short", 0.1, 0.2, 1, 0.125), ("short", 0.1, 1, 1, 1), ("short", -0.01, 5, 10, 10.01999999), # 500% profit at 10x ]) def test_stoploss_from_open_leverage(side, rel_stop, curr_profit, leverage, expected): stoploss = stoploss_from_open(rel_stop, curr_profit, side == 'short', leverage) assert pytest.approx(stoploss) == expected open_rate = 100 if stoploss != 1: if side == 'long': current_rate = open_rate * (1 + curr_profit / leverage) stop = current_rate * (1 - stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 + rel_stop / leverage) else: current_rate = open_rate * (1 - curr_profit / leverage) stop = current_rate * (1 + stoploss / leverage) assert pytest.approx(stop) == open_rate * (1 - rel_stop / leverage) def test_stoploss_from_absolute(): assert pytest.approx(stoploss_from_absolute(90, 100)) == 1 - (90 / 100) assert pytest.approx(stoploss_from_absolute(90, 100)) == 0.1 assert pytest.approx(stoploss_from_absolute(95, 100)) == 0.05 assert pytest.approx(stoploss_from_absolute(100, 100)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100)) == 0 assert pytest.approx(stoploss_from_absolute(100, 0)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, False, leverage=5)) == 5 assert pytest.approx(stoploss_from_absolute(90, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(110, 100, True)) == -(1 - (110 / 100)) assert pytest.approx(stoploss_from_absolute(110, 100, True)) == 0.1 assert pytest.approx(stoploss_from_absolute(105, 100, True)) == 0.05 assert pytest.approx(stoploss_from_absolute(105, 100, True, 5)) == 0.05 * 5 assert pytest.approx(stoploss_from_absolute(100, 0, True)) == 1 assert pytest.approx(stoploss_from_absolute(0, 100, True)) == 0 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True)) == 1 assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True, leverage=5)) == 5 @pytest.mark.parametrize('trading_mode', ['futures', 'spot']) def test_informative_decorator(mocker, default_conf_usdt, trading_mode): candle_def = CandleType.get_default(trading_mode) default_conf_usdt['candle_type_def'] = candle_def test_data_5m = generate_test_data('5m', 40) test_data_30m = generate_test_data('30m', 40) test_data_1h = generate_test_data('1h', 40) data = { ('XRP/USDT', '5m', candle_def): test_data_5m, ('XRP/USDT', '30m', candle_def): test_data_30m, ('XRP/USDT', '1h', candle_def): test_data_1h, ('LTC/USDT', '5m', candle_def): test_data_5m, ('LTC/USDT', '30m', candle_def): test_data_30m, ('LTC/USDT', '1h', candle_def): test_data_1h, ('NEO/USDT', '30m', candle_def): test_data_30m, ('NEO/USDT', '5m', CandleType.SPOT): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '15m', candle_def): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '1h', candle_def): test_data_1h, ('ETH/USDT', '30m', candle_def): test_data_30m, ('ETH/BTC', '1h', CandleType.SPOT): test_data_1h, # Explicitly selected as spot } default_conf_usdt['strategy'] = 'InformativeDecoratorTest' strategy = StrategyResolver.load_strategy(default_conf_usdt) exchange = get_patched_exchange(mocker, default_conf_usdt)
strategy.dp = DataProvider({}, exchange, None)
0
2023-10-21 10:02:05+00:00
16k
generative-skill-chaining/gsc-code
generative_skill_chaining/envs/pybullet/table/predicates.py
[ { "identifier": "primitive_actions", "path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py", "snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANGES: Dict[str, Tuple[float, float]]\n RANGES = {\n \"x\": (-0.2, 0.2),\n \"y\": (-0.1, 0.1),\n \"z\": (-0.05, 0.05),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"x\": (-1.0, 1.0),\n \"y\": (-1.0, 1.0),\n \"z\": (0.0, 0.1),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.2, 0.0),\n \"r_pull\": (-0.4, -0.1),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.4, -0.2),\n \"r_push\": (0.1, 0.4),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def range(cls) -> np.ndarray:\n def random(cls):\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_pull: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_pull(self) -> np.ndarray:\n def r_pull(self, r_pull: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_push: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_push(self) -> np.ndarray:\n def r_push(self, r_push: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:" }, { "identifier": "utils", "path": "generative_skill_chaining/envs/pybullet/table/utils.py", "snippet": "TABLE_CONSTRAINTS = {\n \"table_z_max\": 0.00,\n \"table_x_min\": 0.28,\n \"table_y_min\": -0.45,\n \"table_y_max\": 0.45,\n \"workspace_x_min\": 0.40,\n \"operational_x_min\": 0.50,\n \"operational_x_max\": 0.60,\n \"obstruction_x_min\": 0.575,\n \"workspace_radius\": 0.7,\n}\nEPSILONS = {\"aabb\": 0.01, \"align\": 0.99, \"twist\": 0.001, \"tipping\": 0.1}\nTWIST_HISTORY: Dict[str, Dict[Object, np.ndarray]] = collections.defaultdict(dict)\ndef compute_margins(obj: Object) -> np.ndarray:\ndef compute_object_pose(obj: Object, theta: float) -> math.Pose:\ndef is_above(obj_a: Object, obj_b: Object) -> bool:\ndef is_upright(obj: Object) -> bool:\ndef is_within_distance(\n obj_a: Object, obj_b: Object, distance: float, physics_id: int\n) -> bool:\ndef is_moving(obj: Object, use_history: Optional[str] = None) -> bool:\ndef is_below_table(obj: Object) -> bool:\ndef is_touching(\n body_a: body.Body,\n body_b: body.Body,\n link_id_a: Optional[int] = None,\n link_id_b: Optional[int] = None,\n) -> bool:\ndef is_intersecting(obj_a: Object, obj_b: Object) -> bool:\ndef is_under(obj_a: Object, obj_b: Object) -> bool:\ndef is_inworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef is_beyondworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef load_config(config: Union[str, Any]) -> Any:" }, { "identifier": "Box", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Box(Object):\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 0.1,\n ):\n box = shapes.Box(size=np.array(size), mass=mass, color=np.array(color))\n body_id = shapes.create_body(box, physics_id=physics_id)\n self._shape = box\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = box.size\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return [self._shape]" }, { "identifier": "Hook", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Hook(Object):\n @staticmethod\n def compute_link_positions(\n head_length: float, handle_length: float, handle_y: float, radius: float\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n dy = (\n 0.5\n * np.sign(handle_y)\n * max(0, (abs(handle_y) - 1.0) * head_length / 2 + radius)\n )\n pos_handle = np.array([-radius / 2, handle_y * head_length / 2 - dy, 0.0])\n pos_head = np.array([(handle_length - radius) / 2, -dy, 0.0])\n pos_joint = np.array(\n [(handle_length - radius) / 2, handle_y * head_length / 2 - dy, 0.0]\n )\n\n return pos_handle, pos_head, pos_joint\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n head_length: float,\n handle_length: float,\n handle_y: float,\n color: Union[List[float], np.ndarray],\n radius: float = 0.02,\n mass: float = 0.1,\n ):\n if not isinstance(color, np.ndarray):\n color = np.array(color)\n\n pos_handle, pos_head, pos_joint = Hook.compute_link_positions(\n head_length=head_length,\n handle_length=handle_length,\n handle_y=handle_y,\n radius=radius,\n )\n handle = shapes.Cylinder(\n radius=radius,\n length=handle_length,\n mass=(handle_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_handle,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([0.0, 1.0, 0.0]))\n ).coeffs,\n ),\n )\n head = shapes.Cylinder(\n radius=radius,\n length=head_length,\n mass=(head_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_head,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([1.0, 0.0, 0.0]))\n ).coeffs,\n ),\n )\n joint = shapes.Sphere(\n radius=radius,\n mass=(radius / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(pos=pos_joint),\n )\n self._shapes = [joint, handle, head]\n body_id = shapes.create_body(\n self.shapes, link_parents=[0, 0], physics_id=physics_id\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.head_length = head_length\n self._state.handle_length = handle_length\n self._state.handle_y = handle_y\n self._radius = radius\n\n self._size = np.array(\n [handle_length + radius, head_length + 2 * abs(pos_head[1]), 2 * radius]\n )\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def head_length(self) -> float:\n return self._state.head_length # type: ignore\n\n @property\n def handle_length(self) -> float:\n return self._state.handle_length # type: ignore\n\n @property\n def handle_y(self) -> float:\n return self._state.handle_y # type: ignore\n\n @property\n def radius(self) -> float:\n return self._radius\n\n @property\n def size(self) -> np.ndarray:\n return self._size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the convex hulls of the handle and head links.\"\"\"\n handle_pose = self.shapes[1].pose\n head_pose = self.shapes[2].pose\n assert handle_pose is not None and head_pose is not None\n\n positions = np.array(\n [\n [0.0, handle_pose.pos[1], 0.0],\n [head_pose.pos[0], 0.0, 0.0],\n ]\n )\n sizes = np.array(\n [\n [self.size[0], 2 * self.radius, 2 * self.radius],\n [2 * self.radius, self.size[1], 2 * self.radius],\n ]\n )\n bboxes = np.array([positions - 0.5 * sizes, positions + 0.5 * sizes]).swapaxes(\n 0, 1\n )\n\n pose = self.pose() if world_frame else None\n vertices = [compute_bbox_vertices(bbox, pose, project_2d) for bbox in bboxes]\n\n return vertices\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes\n\n # def aabb(self) -> np.ndarray:\n # raise NotImplementedError" }, { "identifier": "Null", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Null(Object):\n def __init__(self, physics_id: int, name: str):\n sphere = shapes.Sphere(radius=0.001)\n body_id = shapes.create_body(sphere, physics_id=physics_id)\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=True\n )\n\n def state(self) -> object_state.ObjectState:\n # Null object state is a zero vector.\n return self._state\n\n def enable_collisions(self) -> None:\n pass\n\n def unfreeze(self) -> bool:\n return False" }, { "identifier": "Object", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Object(body.Body):\n name: str\n is_static: bool = False\n\n def __init__(\n self, physics_id: int, body_id: int, name: str, is_static: bool = False\n ):\n super().__init__(physics_id, body_id)\n\n self.name = name\n self.is_static = is_static\n\n T_pybullet_to_obj = super().pose().to_eigen()\n self._modified_axes = not T_pybullet_to_obj.is_approx(\n eigen.Isometry3d.identity()\n )\n if self._modified_axes:\n self._T_pybullet_to_obj = T_pybullet_to_obj\n self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()\n\n self._state = object_state.ObjectState()\n\n def pose(self) -> math.Pose:\n if not self._modified_axes:\n return super().pose()\n\n return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)\n\n def set_pose(self, pose: math.Pose) -> None:\n if not self._modified_axes:\n return super().set_pose(pose)\n\n return super().set_pose(\n math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)\n )\n\n def disable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 0, 0, physicsClientId=self.physics_id\n )\n\n def enable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id\n )\n\n @property\n def inertia(self) -> dyn.SpatialInertiad:\n try:\n return self._obj_inertia # type: ignore\n except AttributeError:\n pass\n\n self._obj_inertia = super().inertia\n if self._modified_axes:\n self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj\n\n T_world_to_obj = self.pose().to_eigen().inverse()\n for link_id in range(self.dof):\n link = body.Link(self.physics_id, self.body_id, link_id)\n T_link_to_obj = T_world_to_obj * link.pose().to_eigen()\n self._obj_inertia += link.inertia * T_link_to_obj\n\n return self._obj_inertia\n\n def state(self) -> object_state.ObjectState:\n pose = self.pose()\n aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))\n self._state.pos = pose.pos\n self._state.aa = aa.angle * aa.axis\n\n return self._state\n\n def set_state(self, state: object_state.ObjectState) -> None:\n self.set_pose(state.pose())\n\n def reset(self, action_skeleton: List) -> None:\n pass\n\n @classmethod\n def create(\n cls,\n physics_id: int,\n object_type: Optional[str],\n object_kwargs: Dict[str, Any] = {},\n object_groups: Dict[str, \"ObjectGroup\"] = {},\n **kwargs,\n ) -> \"Object\":\n object_class = Null if object_type is None else globals()[object_type]\n if issubclass(object_class, Variant):\n kwargs[\"object_groups\"] = object_groups\n object_kwargs = object_kwargs.copy()\n object_kwargs.update(kwargs)\n return object_class(physics_id=physics_id, **object_kwargs)\n\n def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:\n return isinstance(self, class_or_tuple)\n\n def type(self) -> Type[\"Object\"]:\n return type(self)\n\n @property\n def size(self) -> np.ndarray:\n raise NotImplementedError\n\n @property\n def bbox(self) -> np.ndarray:\n \"\"\"Returns the bounding box in the object frame.\n\n If the origin of the object is at its geometric center, this will be\n equivalent to `(-0.5 * self.size, 0.5 * self.size)`.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n raise NotImplementedError\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the object's convex hull.\n\n These hulls will be used for rough collision checking. By default,\n the vertices will be the 6 corners of the object's bounding box\n (`Object.bbox`).\n\n Args:\n world_frame: Whether to transform the vertices in world frame or\n leave them in object frame.\n project_2d: Whether to return the 2d convex hull.\n\n Returns:\n List of arrays of shape [_, 3] or [_, 2], where each array is a\n convex hull.\n \"\"\"\n pose = self.pose() if world_frame else None\n vertices = compute_bbox_vertices(self.bbox, pose, project_2d)\n\n return [vertices]\n\n def aabb(self) -> np.ndarray:\n \"\"\"Computes the axis-aligned bounding box from the object pose and size.\n\n This should be more accurate than `super().aabb()`, which gets the aabb\n from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*\n link, while this returns the exact aabb for the entire object.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)\n xyz_min = vertices.min(axis=0)\n xyz_max = vertices.max(axis=0)\n\n return np.array([xyz_min, xyz_max])\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return []\n\n def __str__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other) -> bool:\n return str(self) == str(other)" }, { "identifier": "Rack", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Rack(Object):\n TOP_THICKNESS = 0.01\n LEG_THICKNESS = 0.01\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 1.0,\n ):\n mass /= 7 # Divide mass among all 7 parts.\n top = shapes.Box(\n size=np.array([*size[:2], Rack.TOP_THICKNESS]),\n mass=mass,\n color=np.array(color),\n pose=math.Pose(\n pos=np.array([0.0, 0.0, -Rack.TOP_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n xy_legs = np.array([(x, y) for x in (-1, 1) for y in (-1, 1)]) * (\n (np.array(size[:2])[None, :] - Rack.LEG_THICKNESS) / 2\n )\n legs = [\n shapes.Box(\n size=np.array(\n [\n Rack.LEG_THICKNESS,\n Rack.LEG_THICKNESS,\n size[2] - Rack.TOP_THICKNESS - Rack.LEG_THICKNESS,\n ]\n ),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array(\n [\n *xy_leg,\n -(size[2] + Rack.TOP_THICKNESS - Rack.LEG_THICKNESS) / 2,\n ]\n ),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for xy_leg in xy_legs\n ]\n stabilizers = [\n shapes.Box(\n size=np.array([size[0], Rack.LEG_THICKNESS, Rack.LEG_THICKNESS]),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array([0.0, y_leg, -size[2] + Rack.LEG_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for y_leg in xy_legs[:2, 1]\n ]\n self._shapes = [top, *legs, *stabilizers]\n body_id = shapes.create_body(\n self.shapes,\n link_parents=[0] * (len(legs) + len(stabilizers)),\n physics_id=physics_id,\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = np.array(size)\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n self._bbox[0, 2] = -size[2]\n self._bbox[1, 2] = 0\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes" }, { "identifier": "math", "path": "generative_skill_chaining/envs/pybullet/sim/math.py", "snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:" }, { "identifier": "Robot", "path": "generative_skill_chaining/envs/pybullet/sim/robot.py", "snippet": "class Robot(body.Body):\n \"\"\"User-facing robot interface.\"\"\"\n\n def __init__(\n self,\n physics_id: int,\n step_simulation_fn: Callable[[], None],\n urdf: str,\n arm_class: Union[str, Type[arm.Arm]],\n arm_kwargs: Dict[str, Any],\n gripper_class: Union[str, Type[gripper.Gripper]],\n gripper_kwargs: Dict[str, Any],\n ):\n \"\"\"Loads the robot from a urdf file.\n\n Args:\n physics_id: Pybullet physics client id.\n step_simulation_fn: Function to step simulation.\n urdf: Path to urdf.\n arm_class: In the generative_skill_chaining.envs.pybullet namespace.\n arm_kwargs: Arm kwargs from yaml config.\n gripper_class: In the generative_skill_chaining.envs.pybullet namespace.\n gripper_kwargs: Gripper kwargs from yaml config.\n \"\"\"\n body_id = p.loadURDF(\n fileName=urdf,\n useFixedBase=True,\n flags=p.URDF_USE_INERTIA_FROM_FILE\n | p.URDF_MAINTAIN_LINK_ORDER, # | p.URDF_MERGE_FIXED_LINKS\n physicsClientId=physics_id,\n )\n super().__init__(physics_id, body_id)\n\n if isinstance(arm_class, str):\n arm_class = configs.get_class(arm_class, pybullet)\n if isinstance(gripper_class, str):\n gripper_class = configs.get_class(gripper_class, pybullet)\n\n self._arm = arm_class(self.physics_id, self.body_id, **arm_kwargs)\n T_world_to_ee = dyn.cartesian_pose(self.arm.ab).inverse()\n self._gripper = gripper_class(\n self.physics_id, self.body_id, T_world_to_ee, **gripper_kwargs\n )\n\n self.step_simulation = step_simulation_fn\n\n @property\n def arm(self) -> arm.Arm:\n \"\"\"Controllable arm.\"\"\"\n return self._arm\n\n @property\n def gripper(self) -> gripper.Gripper:\n \"\"\"Controllable gripper.\"\"\"\n return self._gripper\n\n @property\n def home_pose(self) -> math.Pose:\n return self.arm.home_pose\n\n def reset(self) -> bool:\n \"\"\"Resets the robot by setting the arm to its home configuration and the gripper to the open position.\n\n This method disables torque control and bypasses simulation.\n \"\"\"\n self.gripper.reset()\n self.clear_load()\n status = self.arm.reset()\n if isinstance(self.arm, real.arm.Arm):\n status = self.goto_configuration(self.arm.q_home)\n return status\n\n def clear_load(self) -> None:\n \"\"\"Resets the end-effector load to the gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n self.arm.ab.replace_load(self.gripper.inertia)\n else:\n self.arm.ab.clear_load()\n\n def set_load(self, inertia: dyn.SpatialInertiad) -> None:\n \"\"\"Sets the end-effector load to the sum of the given inertia and gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n inertia = inertia + self.gripper.inertia\n self.arm.ab.replace_load(inertia)\n\n def get_state(self) -> Dict[str, Any]:\n return {\n \"arm\": self.arm.get_state(),\n \"gripper\": self.gripper.get_state(),\n \"load\": copy.deepcopy(self.arm.ab.inertia_load),\n }\n\n def set_state(self, state: Dict[str, Any]) -> None:\n self.arm.set_state(state[\"arm\"])\n self.gripper.set_state(state[\"gripper\"])\n idx_link, load_inertia = next(iter(state[\"load\"].items()))\n self.arm.ab.replace_load(load_inertia, idx_link)\n\n def goto_home(self) -> bool:\n \"\"\"Uses opspace control to go to the home position.\"\"\"\n return self.goto_pose(\n self.home_pose.pos,\n self.home_pose.quat,\n pos_gains=(64, 16),\n ori_gains=(64, 16),\n )\n\n def _is_colliding(\n self, body_id_a: int, body_id_b: int, link_id_a: Optional[int] = None\n ) -> bool:\n kwargs = {}\n if link_id_a is not None:\n kwargs[\"linkIndexA\"] = link_id_a\n contacts = p.getContactPoints(\n bodyA=body_id_a, bodyB=body_id_b, physicsClientId=self.physics_id, **kwargs\n )\n\n if not contacts:\n return False\n\n force = contacts[0][9]\n return force > 0.0\n\n def goto_pose(\n self,\n pos: Optional[np.ndarray] = None,\n quat: Optional[Union[eigen.Quaterniond, np.ndarray]] = None,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n ori_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n check_collisions: Sequence[int] = [],\n check_collision_freq: int = 10,\n ) -> bool:\n \"\"\"Uses opspace control to go to the desired pose.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n pos: Optional position. Maintains current position if None.\n quat: Optional quaternion. Maintains current orientation if None.\n pos_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n ori_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n timeout: Uses the timeout specified in the yaml arm config if None.\n check_collisions: Raise an exception if the gripper or grasped\n object collides with any of the body_ids in this list.\n check_collision_freq: Iteration interval with which to check\n collisions.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n if check_collisions:\n body_ids_a = [self.body_id] * len(self.gripper.finger_links)\n link_ids_a: List[Optional[int]] = list(self.gripper.finger_links)\n grasp_body_id = self.gripper._gripper_state.grasp_body_id\n if grasp_body_id is not None:\n body_ids_a.append(grasp_body_id)\n link_ids_a.append(None)\n\n # Set the pose goal.\n self.arm.set_pose_goal(pos, quat, pos_gains, ori_gains, timeout)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter = 0\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter += 1\n\n if isinstance(self.arm, real.arm.Arm):\n continue\n\n if not check_collisions or iter % check_collision_freq != 0:\n continue\n\n # Terminate early if there are collisions with the gripper fingers\n # or grasped object.\n for body_id_a, link_id_a in zip(body_ids_a, link_ids_a):\n for body_id_b in check_collisions:\n if self._is_colliding(body_id_a, body_id_b, link_id_a):\n raise ControlException(\n f\"Robot.goto_pose({pos}, {quat}): Collision {body_id_a}:{link_id_a}, {body_id_b}\"\n )\n # print(\"Robot.goto_pose:\", pos, quat, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.goto_pose({pos}, {quat}): Singularity\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def goto_configuration(self, q: np.ndarray) -> bool:\n \"\"\"Sets the robot to the desired joint configuration.\n\n Args:\n q: Joint configuration.\n Returns:\n True if the controller converges to the desired position or zero\n velocity, false if the command times out.\n \"\"\"\n # Set the configuration goal.\n self.arm.set_configuration_goal(q)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp(\n self,\n command: float,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n ) -> bool:\n \"\"\"Sets the gripper to the desired grasp (0.0 open, 1.0 closed).\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Any existing grasp constraints will be cleared and no new ones will be\n created. Use `Robot.grasp_object()` to create a grasp constraint.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n # Clear any existing grasp constraints.\n self.gripper.remove_grasp_constraint()\n self.clear_load()\n\n # Set the new grasp command.\n self.gripper.set_grasp(command, pos_gains, timeout)\n\n # Simulate until the grasp command finishes.\n status = self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.arm.update_torques()\n self.step_simulation()\n status = self.gripper.update_torques()\n # print(\"Robot.grasp:\", command, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.grasp({command})\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp_object(\n self,\n obj: body.Body,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n realistic: bool = True,\n ) -> bool:\n \"\"\"Attempts to grasp an object and attaches the object to the gripper via a pose constraint.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n realistic: If false, creates a pose constraint regardless of whether\n the object is in a secure grasp.\n Returns:\n True if the object is successfully grasped, false otherwise.\n \"\"\"\n if realistic:\n self.grasp(1, pos_gains, timeout)\n\n # Wait for grasped object to settle.\n status = self.gripper.update_torques()\n while (\n status\n in (\n articulated_body.ControlStatus.VEL_CONVERGED,\n articulated_body.ControlStatus.IN_PROGRESS,\n )\n and self.gripper._gripper_state.iter_timeout >= 0\n and (obj.twist() > 0.001).any()\n ):\n self.arm.update_torques()\n status = self.gripper.update_torques()\n self.step_simulation()\n\n # Make sure fingers aren't fully closed.\n if status == articulated_body.ControlStatus.POS_CONVERGED:\n return False\n\n # Lock the object in place with a grasp constraint.\n if not self.gripper.create_grasp_constraint(obj.body_id, realistic):\n return False\n\n # Add object load.\n T_obj_to_world = obj.pose().to_eigen()\n T_ee_to_world = dyn.cartesian_pose(self.arm.ab)\n T_obj_to_ee = T_ee_to_world.inverse() * T_obj_to_world\n self.set_load(obj.inertia * T_obj_to_ee)\n\n return True" } ]
import dataclasses import random import numpy as np import pybullet as p import symbolic from typing import Optional, Dict, List, Sequence, Tuple, Type from ctrlutils import eigen from shapely.geometry import Polygon, LineString from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack from generative_skill_chaining.envs.pybullet.sim import math from generative_skill_chaining.envs.pybullet.sim.robot import Robot
11,521
def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, ) obj_pose = math.Pose.from_eigen(grasp_pose.to_eigen().inverse()) obj_pose.pos += robot.home_pose.pos # Use fake grasp. obj.disable_collisions() obj.set_pose(obj_pose) robot.grasp_object(obj, realistic=False) obj.enable_collisions() # Make sure object isn't touching gripper. obj.unfreeze() p.stepSimulation(physicsClientId=robot.physics_id) if not utils.is_touching(obj, robot): break elif i + 1 == Inhand.MAX_GRASP_ATTEMPTS: dbprint(f"{self}.sample():", False, "- exceeded max grasp attempts") return False dbprint(f"{self}.sample():", True) return True @staticmethod def generate_grasp_pose( obj: Object, handlegrasp: bool = False, upperhandlegrasp: bool = False ) -> math.Pose: """Generates a grasp pose in the object frame of reference.""" # Maximum deviation of the object from the gripper's center y. MAX_GRASP_Y_OFFSET = 0.01 # Gap required between control point and object bottom. FINGER_COLLISION_MARGIN = 0.02 FINGER_WIDTH = 0.022 FINGER_HEIGHT = 0.04 FINGER_DISTANCE = 0.08 THETA_STDDEV = 0.05 if obj.isinstance(Hook): hook: Hook = obj # type: ignore pos_handle, pos_head, pos_joint = Hook.compute_link_positions( head_length=hook.head_length, handle_length=hook.handle_length, handle_y=hook.handle_y, radius=hook.radius, ) if ( handlegrasp or upperhandlegrasp or np.random.random() < hook.handle_length / (hook.handle_length + hook.head_length) ): # Handle. min_xyz, max_xyz = np.array(obj.bbox) if upperhandlegrasp: min_xyz[0] = 0.0 min_xyz[1] = pos_handle[1] - MAX_GRASP_Y_OFFSET min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] - hook.radius - 0.5 * FINGER_WIDTH if handlegrasp: max_xyz[0] = 0.0 max_xyz[1] = pos_handle[1] + MAX_GRASP_Y_OFFSET theta = 0.0 else: # Head. min_xyz, max_xyz = np.array(obj.bbox) min_xyz[0] = pos_head[0] - MAX_GRASP_Y_OFFSET if hook.handle_y < 0: min_xyz[1] = pos_handle[1] + hook.radius + 0.5 * FINGER_WIDTH min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] + MAX_GRASP_Y_OFFSET if hook.handle_y > 0: max_xyz[1] = pos_handle[1] - hook.radius - 0.5 * FINGER_WIDTH theta = np.pi / 2 else: # Fit object between gripper fingers. theta = np.random.choice([0.0, np.pi / 2]) min_xyz, max_xyz = np.array(obj.bbox) if theta == 0.0: y_center = 0.5 * (min_xyz[1] + max_xyz[1]) min_xyz[1] = max( min_xyz[1] + 0.5 * FINGER_DISTANCE, y_center - MAX_GRASP_Y_OFFSET ) max_xyz[1] = min( max_xyz[1] - 0.5 * FINGER_DISTANCE, y_center + MAX_GRASP_Y_OFFSET ) elif theta == np.pi / 2: x_center = 0.5 * (min_xyz[0] + max_xyz[0]) min_xyz[0] = max( min_xyz[0] + 0.5 * FINGER_DISTANCE, x_center - MAX_GRASP_Y_OFFSET ) max_xyz[0] = min( max_xyz[0] - 0.5 * FINGER_DISTANCE, x_center + MAX_GRASP_Y_OFFSET ) min_xyz[2] += FINGER_COLLISION_MARGIN min_xyz[2] = max(min_xyz[2], max_xyz[0] - FINGER_HEIGHT) xyz = np.random.uniform(min_xyz, max_xyz) theta += np.random.normal(scale=THETA_STDDEV)
dbprint = lambda *args: None # noqa # dbprint = print @dataclasses.dataclass class Predicate: args: List[str] @classmethod def create(cls, proposition: str) -> "Predicate": predicate, args = symbolic.parse_proposition(proposition) predicate_classes = { name.lower(): predicate_class for name, predicate_class in globals().items() } predicate_class = predicate_classes[predicate] return predicate_class(args) def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Generates a geometric grounding of a predicate.""" return True def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Evaluates to True if the geometrically grounded predicate is satisfied.""" return True def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]: return [objects[arg] for arg in self.args] def __str__(self) -> str: return f"{type(self).__name__.lower()}({', '.join(self.args)})" def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other) -> bool: return str(self) == str(other) class HandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the tail end on a hook object.""" pass class UpperHandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the head on a hook object.""" pass class Free(Predicate): """Unary predicate enforcing that no top-down occlusions exist on the object.""" DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = { (Box, Box): 0.05, (Box, Hook): 0.05, (Box, Rack): 0.1, (Hook, Rack): 0.1, } def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: child_obj = self.get_arg_objects(objects)[0] if child_obj.isinstance(Null): return True for obj in objects.values(): if f"inhand({obj})" in state or obj.isinstance(Null) or obj == child_obj: continue if utils.is_under(child_obj, obj): dbprint(f"{self}.value():", False, f"{child_obj} under {obj}") return False obj_a, obj_b = sorted( (child_obj.type(), obj.type()), key=lambda x: x.__name__ ) try: min_distance = Free.DISTANCE_MIN[(obj_a, obj_b)] except KeyError: continue if ( (obj.isinstance(Rack) and f"beyondworkspace({obj})" in state) or f"infront({child_obj}, rack)" in state or f"infront({obj}, rack)" in state ): min_distance = 0.04 if utils.is_within_distance( child_obj, obj, min_distance, obj.physics_id ) and not utils.is_above(child_obj, obj): dbprint( f"{self}.value():", False, f"{child_obj} and {obj} are within min distance", ) return False return True class Tippable(Predicate): """Unary predicate admitting non-upright configurations of an object.""" pass class TableBounds: """Predicate that specifies minimum and maximum x-y bounds on the table.""" MARGIN_SCALE: Dict[Type[Object], float] = {Hook: 0.25} def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds on the table as well as the modified margins.""" assert parent_obj.name == "table" zone = type(self).__name__.lower() poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: pos_bounds = poslimit.bounds(child_obj) zone = random.choice(list(pos_bounds.keys())) # Compute poslimit zone-specific angle if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) return pos_bounds[zone], margin elif f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin @staticmethod def get_poslimit( obj: Object, state: Sequence[Predicate], ) -> Optional["PosLimit"]: try: idx_prop = state.index(f"poslimit({obj})") except ValueError: return None prop = state[idx_prop] assert isinstance(prop, PosLimit) return prop @classmethod def get_zone( cls, obj: Object, state: Sequence[Predicate], ) -> Optional["TableBounds"]: zones = [ prop for prop in state if isinstance(prop, TableBounds) and prop.args[0] == obj ] if not zones and f"on({obj}, table)" in state: return cls() elif len(zones) == 1: return zones[0] elif len(zones) != 1: raise ValueError(f"{obj} cannot be in multiple zones: {zones}") return None @staticmethod def scale_margin(obj: Object, margins: np.ndarray) -> np.ndarray: try: bounds = TableBounds.MARGIN_SCALE[obj.type()] except KeyError: return margins return bounds * margins class Aligned(Predicate): """Unary predicate enforcing that the object and world coordinate frames align.""" ANGLE_EPS: float = 0.002 ANGLE_STD: float = 0.05 ANGLE_ABS: float = 0.1 ZONE_ANGLES: Dict[Tuple[Type[Object], Optional[str]], float] = { (Rack, "inworkspace"): 0.5 * np.pi, (Rack, "beyondworkspace"): 0.0, } # def value( # self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] # ) -> bool: # obj = self.get_arg_objects(objects)[0] # if obj.isinstance(Null): # return True # try: # zone = TableBounds.get_zone(obj=obj, state=state) # angle_mean = Aligned.ZONE_ANGLES[(obj.type(), type(zone).__name__.lower())] # if ( # angle_mean - Aligned.ANGLE_ABS < -np.pi # or angle_mean + Aligned.ANGLE_ABS > np.pi # ): # raise ValueError("Cannot recover wrapped angle.") # except KeyError: # angle_mean = 0.0 # angle = eigen.AngleAxisd(eigen.Quaterniond(obj.pose().quat)).angle - angle_mean # if not ( # Aligned.ANGLE_EPS <= abs(angle) <= Aligned.ANGLE_ABS # and utils.is_upright(obj) # ): # dbprint(f"{self}.value():", False) # return False # return True @staticmethod def sample_angle(obj: Object, zone: Optional[str] = None) -> float: angle = 0.0 while abs(angle) < Aligned.ANGLE_EPS: angle = np.random.randn() * Aligned.ANGLE_STD try: angle_mu = Aligned.ZONE_ANGLES[(obj.type(), zone)] except KeyError: angle_mu = 0.0 angle = np.clip( angle + angle_mu, angle_mu - Aligned.ANGLE_ABS, angle_mu + Aligned.ANGLE_ABS, ) angle = (angle + np.pi) % (2 * np.pi) - np.pi return angle class PosLimit(Predicate): """Unary predicate limiting the placement positions of particular object types.""" POS_EPS: Dict[Type[Object], float] = {Rack: 0.01} POS_SPEC: Dict[Type[Object], Dict[str, np.ndarray]] = { Rack: { "inworkspace": np.array([0.44, -0.33]), "beyondworkspace": np.array([0.82, 0.00]), } } def bounds(self, child_obj: Object) -> Dict[str, np.ndarray]: assert child_obj.name == self.args[0] if child_obj.type() not in PosLimit.POS_SPEC: raise ValueError(f"Positions not specified for {child_obj.type()}") eps = PosLimit.POS_EPS[child_obj.type()] xys = PosLimit.POS_SPEC[child_obj.type()] bounds = {k: np.array([xy - eps, xy + eps]) for k, xy in xys.items()} return bounds class InWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance((Null, Rack)): # Rack is in workspace by construction. return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not utils.is_inworkspace(obj_pos=obj_pos, distance=distance): dbprint( f"{self}.value():", False, "- pos:", obj_pos[:2], "distance:", distance ) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds inside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class InCollisionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the collision zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["workspace_x_min"] <= obj.pose().pos[0] < utils.TABLE_CONSTRAINTS["operational_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_min += margin xy_max -= margin return bounds, margin class InOperationalZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the operational zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["operational_x_min"] <= obj_pos[0] < utils.TABLE_CONSTRAINTS["operational_x_max"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_max"] xy_min += margin xy_max -= margin return bounds, margin class InObstructionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the obstruction zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( obj_pos[0] >= utils.TABLE_CONSTRAINTS["obstruction_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["obstruction_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class BeyondWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True distance = float(np.linalg.norm(obj.pose().pos[:2])) if not utils.is_beyondworkspace(obj=obj, distance=distance): return False dbprint(f"{self}.value():", False, "- distance:", distance) return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds r = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min[0] = r * np.cos(np.arcsin(0.5 * (xy_max[1] - xy_min[1]) / r)) xy_min += margin xy_max -= margin return bounds, margin class InOodZone(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = bounds[0, 0] xy_max[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin class Inhand(Predicate): MAX_GRASP_ATTEMPTS = 1 def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, ) obj_pose = math.Pose.from_eigen(grasp_pose.to_eigen().inverse()) obj_pose.pos += robot.home_pose.pos # Use fake grasp. obj.disable_collisions() obj.set_pose(obj_pose) robot.grasp_object(obj, realistic=False) obj.enable_collisions() # Make sure object isn't touching gripper. obj.unfreeze() p.stepSimulation(physicsClientId=robot.physics_id) if not utils.is_touching(obj, robot): break elif i + 1 == Inhand.MAX_GRASP_ATTEMPTS: dbprint(f"{self}.sample():", False, "- exceeded max grasp attempts") return False dbprint(f"{self}.sample():", True) return True @staticmethod def generate_grasp_pose( obj: Object, handlegrasp: bool = False, upperhandlegrasp: bool = False ) -> math.Pose: """Generates a grasp pose in the object frame of reference.""" # Maximum deviation of the object from the gripper's center y. MAX_GRASP_Y_OFFSET = 0.01 # Gap required between control point and object bottom. FINGER_COLLISION_MARGIN = 0.02 FINGER_WIDTH = 0.022 FINGER_HEIGHT = 0.04 FINGER_DISTANCE = 0.08 THETA_STDDEV = 0.05 if obj.isinstance(Hook): hook: Hook = obj # type: ignore pos_handle, pos_head, pos_joint = Hook.compute_link_positions( head_length=hook.head_length, handle_length=hook.handle_length, handle_y=hook.handle_y, radius=hook.radius, ) if ( handlegrasp or upperhandlegrasp or np.random.random() < hook.handle_length / (hook.handle_length + hook.head_length) ): # Handle. min_xyz, max_xyz = np.array(obj.bbox) if upperhandlegrasp: min_xyz[0] = 0.0 min_xyz[1] = pos_handle[1] - MAX_GRASP_Y_OFFSET min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] - hook.radius - 0.5 * FINGER_WIDTH if handlegrasp: max_xyz[0] = 0.0 max_xyz[1] = pos_handle[1] + MAX_GRASP_Y_OFFSET theta = 0.0 else: # Head. min_xyz, max_xyz = np.array(obj.bbox) min_xyz[0] = pos_head[0] - MAX_GRASP_Y_OFFSET if hook.handle_y < 0: min_xyz[1] = pos_handle[1] + hook.radius + 0.5 * FINGER_WIDTH min_xyz[2] += FINGER_COLLISION_MARGIN max_xyz[0] = pos_head[0] + MAX_GRASP_Y_OFFSET if hook.handle_y > 0: max_xyz[1] = pos_handle[1] - hook.radius - 0.5 * FINGER_WIDTH theta = np.pi / 2 else: # Fit object between gripper fingers. theta = np.random.choice([0.0, np.pi / 2]) min_xyz, max_xyz = np.array(obj.bbox) if theta == 0.0: y_center = 0.5 * (min_xyz[1] + max_xyz[1]) min_xyz[1] = max( min_xyz[1] + 0.5 * FINGER_DISTANCE, y_center - MAX_GRASP_Y_OFFSET ) max_xyz[1] = min( max_xyz[1] - 0.5 * FINGER_DISTANCE, y_center + MAX_GRASP_Y_OFFSET ) elif theta == np.pi / 2: x_center = 0.5 * (min_xyz[0] + max_xyz[0]) min_xyz[0] = max( min_xyz[0] + 0.5 * FINGER_DISTANCE, x_center - MAX_GRASP_Y_OFFSET ) max_xyz[0] = min( max_xyz[0] - 0.5 * FINGER_DISTANCE, x_center + MAX_GRASP_Y_OFFSET ) min_xyz[2] += FINGER_COLLISION_MARGIN min_xyz[2] = max(min_xyz[2], max_xyz[0] - FINGER_HEIGHT) xyz = np.random.uniform(min_xyz, max_xyz) theta += np.random.normal(scale=THETA_STDDEV)
theta = np.clip(theta, *primitive_actions.PickAction.RANGES["theta"])
0
2023-10-16 00:22:40+00:00
16k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}" }, { "identifier": "KO_NAMES", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py", "snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}" }, { "identifier": "LANGUAGE_SUPPORTED_COUNT", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py", "snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)" }, { "identifier": "TOO_SMALL_SEQUENCE", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py", "snippet": "TOO_SMALL_SEQUENCE: int = 32" }, { "identifier": "ZH_NAMES", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py", "snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}" }, { "identifier": "is_suspiciously_successive_range", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/md.py", "snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n return True" }, { "identifier": "CoherenceMatches", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/models.py", "snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:" }, { "identifier": "is_accentuated", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n )" }, { "identifier": "is_latin", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description" }, { "identifier": "is_multi_byte_encoding", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )" }, { "identifier": "is_unicode_range_secondary", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)" }, { "identifier": "unicode_range", "path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None" } ]
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,342
if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
is_suspiciously_successive_range(discovered_range, character_range)
5
2023-10-23 18:09:28+00:00
16k
zju3dv/nr_in_a_room
data_gen/batch_real_scene_neural_render.py
[ { "identifier": "read_json", "path": "utils/util.py", "snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)" }, { "identifier": "read_yaml", "path": "utils/util.py", "snippet": "def read_yaml(fname):\n with open(fname, \"r\") as stream:\n return yaml.safe_load(stream)" }, { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n chunk: int,\n model_ckpt_path_dict: Dict[str, Any],\n config=None,\n scale_factor_dict: Dict[str, Any] = {},\n scene_info_path: str = None,\n scene_info_json_path: str = None,\n model_type=\"NeuS\",\n N_samples: int = 64,\n N_importance: int = 128,\n relation_info: Dict[str, Any] = {},\n output_path: str = None,\n prefix: str = \"\",\n active_instance_id: list = [46, 4, 9, 102],\n virtual_instance_id: list = [], # specific for edit (insert virtual to real) mode\n filter_door_and_window: bool = True,\n lr: float = 1e-2,\n N_optim_step: int = 500,\n adjust_lr_per_step: int = 150,\n optim_batch_size: int = 1024,\n use_amp: bool = False,\n extract_obj_bbox_from_neural_model: bool = False,\n ig_data_base_dir: str = \"data/ig_dataset_v1.0.1/\",\n mask_per_object: bool = False,\n bbox_ray_intersect: bool = True,\n bbox_enlarge: float = 0.1,\n optimize_light_env: bool = True,\n optimize_appearance_code: bool = False,\n use_light_from_image_attr: bool = False,\n use_appearance_from_image_attr: bool = False,\n optimize_option: list = [\n \"photometric_loss\",\n \"perceptual_loss\",\n \"z_axis_align_loss\",\n \"object_room_wall_attach\",\n \"object_room_floor_attach\",\n \"physical_violation\",\n \"object_object_attach\",\n ],\n ):\n # load config\n self.scene_info_path = scene_info_path\n self.scale_factor = scale_factor\n self.scale_factor_dict = scale_factor_dict\n self.bg_scale_factor = bg_scale_factor\n self.bg_scene_center = np.array(bg_scene_center)\n self.ig_data_base_dir = ig_data_base_dir\n self.mask_per_object = mask_per_object\n self.bbox_ray_intersect = bbox_ray_intersect\n self.bbox_enlarge = bbox_enlarge\n self.virtual_instance_id = virtual_instance_id\n\n self.img_wh = img_wh\n self.w = img_wh[0]\n self.h = img_wh[1]\n self.near = near\n self.far = far\n self.N_importance = N_importance\n self.N_samples = N_samples\n self.chunk = chunk\n self.lr = lr\n self.N_optim_step = N_optim_step\n self.adjust_lr_per_step = adjust_lr_per_step\n self.optim_batch_size = optim_batch_size\n self.use_amp = use_amp\n self.optimize_light_env = optimize_light_env\n self.optimize_appearance_code = optimize_appearance_code\n self.optimize_option = optimize_option\n self.config = config\n\n self.use_light_from_image_attr = use_light_from_image_attr\n if self.use_light_from_image_attr:\n print(\n \"WARNING: self.use_light_from_image_attr = True, using hard coded light env.\"\n )\n self.hard_coded_light_id = 0 # just for compatibility\n # self.hard_coded_light_id = 9 # probe_03 in 10 HDR multi_light training\n\n self.use_appearance_from_image_attr = use_appearance_from_image_attr\n if self.use_appearance_from_image_attr:\n print(\n \"WARNING: self.use_appearance_from_image_attr = True, using first frame appearance code.\"\n )\n self.hard_coded_appearance_frame_id = 0\n\n self.optimize_exposure = \"optimize_exposure\" in self.optimize_option\n\n # laod scene info\n if scene_info_json_path is None:\n scene_info_json_path = os.path.join(scene_info_path, \"data.json\")\n self.scene_meta = read_json(scene_info_json_path)\n\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n self.relation_info = relation_info\n\n self.model_type = model_type\n # self.load_model(\n # model_type, model_ckpt_path_dict[\"obj\"], model_ckpt_path_dict[\"bg\"]\n # )\n self.load_model_from_dict_path(model_type, model_ckpt_path_dict)\n\n self.reset_optimizable_parameters()\n\n if extract_obj_bbox_from_neural_model:\n self.extract_bounding_boxes_from_neural_model()\n\n if self.bbox_ray_intersect:\n self.prepare_bbox_ray_helper()\n\n self.set_output_path(output_path, prefix)\n\n print(\"RoomOptimizer initialize finished.\")\n\n def load_model_from_dict_path(self, model_type, model_ckpt_path_dict):\n assert model_type == \"NeuS\"\n self.models = {}\n self.image_attrs = {}\n\n # avoid duplicate loading\n self.models_cache = {}\n self.image_attrs_cache = {}\n\n print(\"loading model with instance_id\", self.active_instance_id)\n\n # print(model_ckpt_path_dict)\n for obj_id in self.active_instance_id:\n # identify ckpt_path\n if str(obj_id) in model_ckpt_path_dict:\n ckpt_info = model_ckpt_path_dict[str(obj_id)]\n elif obj_id == 0:\n assert (\n \"bg\" in model_ckpt_path_dict or \"0\" in model_ckpt_path_dict\n ), \"model_ckpt_path_dict missing background 'bg' or '0' ckpt\"\n ckpt_info = model_ckpt_path_dict.get(\"bg\", model_ckpt_path_dict[\"0\"])\n else:\n print(\n f\"Cannot find specific model for obj_id = {obj_id}, \\\n maybe config file is not compatible with given active_instance_id.\"\n )\n ckpt_info = model_ckpt_path_dict[\"obj\"]\n # load with cache\n ckpt_path, neus_conf = ckpt_info[\"path\"], ckpt_info[\"neus_conf\"]\n if ckpt_info not in self.models_cache:\n (\n self.models_cache[ckpt_path],\n self.image_attrs_cache[ckpt_path],\n ) = self.load_model_neus(ckpt_path, obj_id, neus_conf)\n self.models[f\"neus_{obj_id}\"] = self.models_cache[ckpt_path]\n self.image_attrs[str(obj_id)] = self.image_attrs_cache[ckpt_path]\n\n def load_model_nerf(self, ckpt_path):\n # TODO(ybbbbt): fix hard coding\n conf = {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n }\n nerf_coarse = NeRF_Object(conf)\n nerf_fine = NeRF_Object(conf)\n image_attributes = ImageAttributes(conf)\n load_ckpt(nerf_coarse, ckpt_path, model_name=\"nerf_coarse\")\n load_ckpt(nerf_fine, ckpt_path, model_name=\"nerf_fine\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n nerf_coarse = nerf_coarse.cuda().eval()\n nerf_fine = nerf_fine.cuda().eval()\n image_attributes = image_attributes.cuda().eval()\n\n models = {\n \"coarse\": nerf_coarse,\n \"fine\": nerf_fine,\n }\n\n embedding_xyz = Embedding(3, 10)\n embedding_dir = Embedding(3, 4)\n embeddings = {\n \"xyz\": embedding_xyz,\n \"dir\": embedding_dir,\n }\n return models, embeddings, image_attributes\n\n def load_model_neus(self, ckpt_path, obj_id, config_path=\"config/neus.yaml\"):\n conf = {\n \"model\": {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n },\n }\n if self.optimize_light_env:\n # conf[\"model\"].update({\"N_max_lights\": 128, \"N_light_embedding\": 16})\n conf[\"model\"].update({\"N_max_lights\": 1024, \"N_light_embedding\": 16})\n\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n conf[\"model\"].update(\n {\"N_max_appearance_frames\": 10000, \"N_appearance_embedding\": 16}\n )\n\n neus, render_kwargs_train, render_kwargs_test = get_model_neus(\n config_path=config_path, need_trainer=False, extra_conf=conf\n )\n self.render_kwargs_neus = render_kwargs_test\n image_attributes = ImageAttributes(conf[\"model\"])\n\n print(ckpt_path)\n load_ckpt(neus, ckpt_path, model_name=\"neus\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n if self.config is not None and (\n str(obj_id) in self.config.get(\"map_virtual_to_local\", {})\n ):\n # image_attributes.embedding_instance\n real_id_in_ckpt = self.config.map_virtual_to_local[str(obj_id)]\n image_attributes.embedding_instance.weight.requires_grad = False\n image_attributes.embedding_instance.weight[\n obj_id\n ] = image_attributes.embedding_instance.weight[real_id_in_ckpt]\n # ipdb.set_trace()\n\n neus.cuda().eval()\n image_attributes.cuda().eval()\n return neus, image_attributes\n\n def reset_optimizable_parameters(self):\n self.params = []\n self.relation_info = {}\n if self.optimize_light_env:\n self.initialize_light_code()\n\n if self.optimize_appearance_code:\n self.initialize_appearance_code()\n\n if self.optimize_exposure:\n self.initialize_autoexposure()\n\n def save_optimizable_parameters(self, path):\n all_param_dict = {}\n # all_param_dict[\"params\"] = self.params\n all_param_dict[\"relation_info\"] = self.relation_info\n all_param_dict[\"object_pose_dict\"] = copy.deepcopy(self.object_pose_dict)\n all_param_dict[\"active_instance_id\"] = copy.deepcopy(self.active_instance_id)\n if self.optimize_light_env:\n all_param_dict[\"light_code\"] = copy.deepcopy(self.light_code_dict)\n if self.optimize_appearance_code:\n all_param_dict[\"appearance_code\"] = copy.deepcopy(self.appearance_code_dict)\n if self.optimize_exposure:\n all_param_dict[\"exposure\"] = copy.deepcopy(self.autoexposure_param)\n torch.save(all_param_dict, path)\n\n def load_optimizable_parameters(self, path):\n all_param_dict = torch.load(path)\n # self.params = all_param_dict[\"params\"]\n self.relation_info = all_param_dict[\"relation_info\"]\n if len(self.virtual_instance_id) == 0: # not overwrite in edit mode\n self.active_instance_id = all_param_dict[\"active_instance_id\"]\n\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if len(self.virtual_instance_id) == 0: # not modify edit mode pose\n if hasattr(self, \"object_pose_dict\"):\n self.object_pose_dict.update(all_param_dict[\"object_pose_dict\"])\n else:\n self.object_pose_dict = all_param_dict[\"object_pose_dict\"]\n if self.optimize_light_env:\n self.light_code_dict = all_param_dict[\"light_code\"]\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n self.appearance_code_dict = all_param_dict[\"appearance_code\"]\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure and \"exposure\" in all_param_dict:\n self.autoexposure_param = all_param_dict[\"exposure\"]\n to_gpu(self.autoexposure_param)\n # ipdb.set_trace()\n\n def interpolate_light_env_from_states(self, path1, path2, interp):\n all_param_dict_1 = torch.load(path1)\n all_param_dict_2 = torch.load(path2)\n\n # self.params = all_param_dict[\"params\"]\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if self.optimize_light_env:\n light_code_dict_1 = all_param_dict_1[\"light_code\"]\n light_code_dict_2 = all_param_dict_2[\"light_code\"]\n for k, v in self.light_code_dict.items():\n self.light_code_dict[k] = light_code_dict_1[\n k\n ] * interp + light_code_dict_2[k] * (1 - interp)\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n appearance_code_dict_1 = all_param_dict_1[\"appearance_code\"]\n appearance_code_dict_2 = all_param_dict_2[\"appearance_code\"]\n for k, v in self.appearance_code_dict.items():\n self.appearance_code_dict[k] = appearance_code_dict_1[\n k\n ] * interp + appearance_code_dict_2[k] * (1 - interp)\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure:\n autoexposure_param_1 = all_param_dict_1[\"exposure\"]\n autoexposure_param_2 = all_param_dict_2[\"exposure\"]\n for k, v in self.autoexposure_param.items():\n self.autoexposure_param[k] = autoexposure_param_1[\n k\n ] * interp + autoexposure_param_2[k] * (1 - interp)\n to_gpu(self.autoexposure_param)\n\n def reset_active_instance_id(self, active_instance_id, filter_door_and_window=True):\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n def set_output_path(self, output_path: str, prefix: str, with_timestamp=True):\n if output_path is not None:\n if with_timestamp:\n self.output_path = os.path.join(\n output_path, f\"rendered_{get_timestamp()}_{prefix}\"\n )\n else:\n self.output_path = os.path.join(output_path, f\"{prefix}\")\n os.makedirs(self.output_path, exist_ok=True)\n\n def filter_door_and_window(self):\n print(\"Filtering door and window objects.\")\n filtered_active_instance_id = []\n for obj_id in self.active_instance_id:\n if self.get_type_of_instance(obj_id) not in [\"door\", \"window\"]:\n filtered_active_instance_id += [obj_id]\n self.active_instance_id = filtered_active_instance_id\n\n def initialize_light_code(self):\n self.light_code_dict = {}\n for obj_id in self.active_instance_id:\n # light_code = torch.randn((16)).cuda()\n light_code = torch.zeros((16)).cuda()\n light_code.requires_grad = True\n self.params += [\n {\"params\": light_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.light_code_dict[str(obj_id)] = light_code\n\n def initialize_appearance_code(self):\n self.appearance_code_dict = {}\n for obj_id in self.active_instance_id:\n # appearance_code = torch.randn((16)).cuda()\n appearance_code = torch.zeros((16)).cuda()\n appearance_code.requires_grad = True\n self.params += [\n {\"params\": appearance_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.appearance_code_dict[str(obj_id)] = appearance_code\n\n def initialize_autoexposure(self):\n self.autoexposure_param = {}\n for obj_id in self.active_instance_id:\n # scale and shift\n autoexposure_param = torch.Tensor([1, 1, 1, 0, 0, 0]).cuda()\n autoexposure_param.requires_grad = True\n self.params += [\n {\"params\": autoexposure_param, \"lr\": self.lr * 0.1}\n ] # light code can be optimized with larger lr\n self.autoexposure_param[str(obj_id)] = autoexposure_param\n\n def get_scale_factor(self, obj_id):\n if obj_id == 0:\n return self.bg_scale_factor\n elif str(obj_id) in self.scale_factor_dict:\n return self.scale_factor_dict[str(obj_id)]\n else:\n return self.scale_factor\n\n def extract_bounding_boxes_from_neural_model(self):\n print(\"Extracting object bounding boxes from neural model...\")\n assert self.model_type == \"NeuS\"\n for obj_id in tqdm(self.active_instance_id):\n mesh = extract_mesh_from_neus(\n self.models[f\"neus_{obj_id}\"],\n self.image_attrs[str(obj_id)],\n obj_id,\n )\n bbox = mesh.get_axis_aligned_bounding_box()\n bound = np.array([bbox.min_bound, bbox.max_bound])\n size = (bound[1] - bound[0]) * self.get_scale_factor(obj_id)\n # update scene_meta\n for idx, obj_info in enumerate(self.scene_meta[\"objs\"]):\n if obj_info[\"id\"] == obj_id:\n self.scene_meta[\"objs\"][idx][\"bdb3d\"][\"size\"] = size.tolist()\n\n def prepare_bbox_ray_helper(self):\n # bbox ray helper dict\n self.bbox_ray_helper_dict = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n length = np.array(obj_meta_info[\"bbox3d\"][\"size\"])\n self.bbox_ray_helper_dict[str(obj_id)] = BBoxRayHelper(np.zeros(3), length)\n\n def generate_object_rays(\n self, rays_o_obj, rays_d_obj, obj_id, near=None, far=None, select_ind=None\n ):\n \"\"\"\n Generate object rays given rays_o, rays_d and obj_id\n Input:\n select_ind: only for masked rendering\n \"\"\"\n if obj_id == 0: # background\n return self.generate_bg_rays(rays_o_obj, rays_d_obj, near=near, far=far)\n if self.bbox_ray_intersect:\n # for object, rays_o and rays_d should lie in world scale (unscaled)\n bbox_mask, bbox_batch_near, bbox_batch_far = self.bbox_ray_helper_dict[\n str(obj_id)\n ].get_ray_bbox_intersections(\n rays_o_obj,\n rays_d_obj,\n self.get_scale_factor(obj_id),\n # bbox_enlarge=self.bbox_enlarge / self.get_scale_factor(obj_id),\n bbox_enlarge=self.bbox_enlarge, # in physical world\n )\n # for area which hits bbox, we use bbox hit near far\n # bbox_ray_helper has scale for us, do no need to rescale\n batch_near_obj, batch_far_obj = bbox_batch_near, bbox_batch_far\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n # for the invalid part, we use 0 as near far, which assume that (0, 0, 0) is empty\n batch_near_obj[~bbox_mask] = torch.zeros_like(batch_near_obj[~bbox_mask])\n batch_far_obj[~bbox_mask] = torch.zeros_like(batch_far_obj[~bbox_mask])\n else:\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_obj = (\n near\n / self.get_scale_factor(obj_id)\n * torch.ones_like(rays_o_obj[:, :1])\n )\n batch_far_obj = (\n far / self.get_scale_factor(obj_id) * torch.ones_like(rays_d_obj[:, :1])\n )\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n\n if self.mask_per_object:\n # mask out of bound rendering\n obj_mask = torch.from_numpy(self.instance_mask == obj_id).view(-1)\n obj_mask = obj_mask[select_ind]\n batch_near_obj[~obj_mask] = 0\n batch_far_obj[~obj_mask] = 0\n\n rays_obj = torch.cat(\n [rays_o_obj, rays_d_obj, batch_near_obj, batch_far_obj], 1\n ) # (H*W, 8)\n rays_obj = rays_obj.cuda()\n return rays_obj\n\n def generate_bg_rays(self, rays_o_bg, rays_d_bg, near=None, far=None):\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_bg = near / self.bg_scale_factor * torch.ones_like(rays_o_bg[:, :1])\n batch_far_bg = far / self.bg_scale_factor * torch.ones_like(rays_d_bg[:, :1])\n rays_o_bg = rays_o_bg / self.bg_scale_factor\n rays_bg = torch.cat(\n [rays_o_bg, rays_d_bg, batch_near_bg, batch_far_bg], 1\n ) # (H*W, 8)\n rays_bg = rays_bg.cuda()\n return rays_bg\n\n def batched_inference_multi(\n self,\n rays_list,\n obj_id_list,\n to_cpu=True,\n hit_test_only=False,\n need_normal=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=True,\n refine_edge=False,\n refine_edge_obj_ids=[],\n render_mask=False,\n # use_sphere_tracing=False,\n show_progress=False,\n **kwargs,\n ):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays_list[0].shape[0]\n results = defaultdict(list)\n for i in tqdm(range(0, B, self.chunk), disable=not show_progress):\n extra_chunk = dict()\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor) and \"autoexposure_\" not in k:\n extra_chunk[k] = v[i : i + self.chunk]\n else:\n extra_chunk[k] = v\n if self.model_type == \"NeRF\":\n rendered_ray_chunks = render_rays_multi(\n self.models,\n self.embeddings,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n self.N_samples,\n use_disp=False,\n perturb=0.001,\n # perturb=0.00,\n noise_std=0,\n N_importance=self.N_importance,\n chunk=self.chunk,\n white_back=True,\n individual_weight_for_coarse=True,\n obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n **extra_chunk,\n )\n elif self.model_type == \"NeuS\":\n rendered_ray_chunks = render_rays_multi_neus(\n self,\n self.models,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n noise_std=0,\n white_back=True,\n # white_back=False,\n # obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n hit_test_only=hit_test_only,\n need_normal=need_normal,\n use_sphere_tracing=use_sphere_tracing,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n refine_edge_obj_ids=refine_edge_obj_ids,\n render_mask=render_mask,\n extra_dict=extra_chunk,\n render_kwargs=self.render_kwargs_neus,\n )\n\n for k, v in rendered_ray_chunks.items():\n if to_cpu:\n results[k] += [v.cpu()]\n else:\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def render_full_scene(\n self,\n pose: np.ndarray,\n idx: int,\n h: int,\n w: int,\n write_idx_on_image=True,\n return_raw_image=False,\n render_mask=False,\n refine_edge=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=False,\n show_progress=False,\n refine_edge_obj_ids=[],\n fovx_deg=0,\n ):\n extra_dict = dict()\n extra_dict[\"compute_3d_mask\"] = False\n extra_dict[\"is_eval\"] = True\n\n rays_list = []\n object_id_list = []\n\n if fovx_deg > 0:\n focal = (w / 2) / np.tan((fovx_deg / 2) / (180 / np.pi))\n print(\"focal =\", focal)\n directions = get_ray_directions(h, w, focal).cuda() # (h, w, 3)\n else:\n directions = get_ray_directions_equirectangular(h, w).cuda() # (h, w, 3)\n\n for obj_id in self.active_instance_id:\n # get object location\n # Two: object to world pose\n if obj_id == 0: # 0 denotes background\n Two = np.eye(4)\n Two[:3, 3] = self.bg_scene_center\n else: # other objects\n Two = torch.eye(4).cuda()\n Two[:3, :3] = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n Two[:3, 3] = self.object_pose_dict[str(obj_id)][\"trans\"]\n Two = Two.detach().cpu().numpy()\n # pose: Twc\n # we need: Toc\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n\n Toc = np.linalg.inv(Two) @ Twc\n\n Toc = torch.from_numpy(Toc).float().cuda()[:3, :4]\n rays_o, rays_d = get_rays(directions, Toc)\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id)\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr or obj_id in self.virtual_instance_id:\n if not hasattr(self, \"hard_code_light_id\"):\n self.hard_coded_light_id = 0\n extra_dict[\"embedding_light_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n\n # optimize exposure\n if self.optimize_exposure and obj_id not in self.virtual_instance_id:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n with torch.cuda.amp.autocast(enabled=True):\n with torch.no_grad():\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n use_sphere_tracing=use_sphere_tracing,\n # use_sphere_tracing=True,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n render_mask=render_mask,\n show_progress=show_progress,\n **extra_dict,\n )\n img = results[f\"rgb_fine\"]\n img_pred = np.clip(img.view(h, w, 3).cpu().numpy(), 0, 1)\n img_pred_ = (img_pred * 255).astype(np.uint8)\n\n if return_raw_image:\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0]\n .cpu()\n .numpy()\n .round()\n .astype(np.uint16)\n )\n return img_pred_, img_mask\n return img_pred_ # raw image in [h, w, 3] np.uint8\n\n if write_idx_on_image:\n img_pred_ = cv2.putText(\n img_pred_,\n \"Iter: {:03d}\".format(idx),\n (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (255, 0, 0),\n 2,\n )\n\n imageio.imwrite(\n os.path.join(self.output_path, f\"{idx:06d}.multi_obj.png\"), img_pred_\n )\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0].cpu().numpy().round().astype(np.uint16)\n )\n cv2.imwrite(os.path.join(self.output_path, f\"{idx:06d}.seg.png\"), img_mask)\n\n def set_initial_object_poses_from_scene_meta(self, add_noise=True):\n self.object_pose_dict = {}\n\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n if \"gt_T_wo\" in obj_meta_info:\n Two = obj_meta_info[\"gt_T_wo\"]\n else:\n print(\n f\"Cannot find object pose for obj_id = {obj_id}, use custom pose with minor offset.\"\n )\n Two = np.eye(4)\n from scipy.spatial.transform import Rotation as R\n\n rot_fix = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)\n # TODO: update initial pose for real-world scenes\n # if obj_id == 31:\n # blender_xyz = np.array([-1.44, 1.18, 0.1])\n # blender_rot = R.from_quat([0.5, -0.5, 0.5, 0.5]).as_matrix()\n # elif obj_id == 32:\n # blender_xyz = np.array([0.76, 0.54, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n # elif obj_id == 33:\n # blender_xyz = np.array([-0.06, 1.01, -0.9])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 34:\n # blender_xyz = np.array([-0.05, 1.14, -0.15])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 35:\n # blender_xyz = np.array([-0.35, 1.1, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n\n # Two[:3, :3] = blender_rot @ rot_fix\n # Two[:3, :3] = rot_fix @ blender_rot\n # Two[:3, 3] = rot_fix @ blender_xyz\n\n # Two[1, 3] += 0.75\n # Two[2, 3] -= 0.7\n\n # add noise\n if add_noise:\n Two[:3, 3] += 0.1\n from scipy.spatial.transform import Rotation as R\n\n rot_noise = R.from_euler(\"z\", 20, degrees=True).as_matrix()\n Two[:3, :3] = Two[:3, :3] @ rot_noise\n Two = torch.from_numpy(Two).float().cuda()\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n if \"fix_object_pose\" not in self.optimize_option:\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_from_prediction(self, pred_json_path):\n print(\"Initial pose from\", pred_json_path)\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n pred_info = read_json(pred_json_path)\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.array(pred_info[str(obj_id)][\"Two\"])\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n\n if not \"fix_object_pose\" in self.optimize_option:\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_as_identity(self):\n print(\"Initial pose as identity.\")\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.eye(4)\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_sampling_mask_from_seg(\n self,\n seg_mask=None,\n seg_mask_path=None,\n add_noise_to_seg=0,\n convert_seg_mask_to_box_mask=False,\n ):\n if seg_mask_path is not None:\n print(\"Read segmentation from gt mask\")\n # read mask\n self.instance_mask = get_instance_mask(seg_mask_path, img_wh=self.img_wh)\n elif seg_mask is not None:\n self.instance_mask = seg_mask\n else:\n print(\"Warning: empty mask\")\n self.merged_mask = (\n np.ones((self.img_wh[1], self.img_wh[0])).reshape(-1).astype(bool)\n )\n return\n\n # merge active object masks\n merged_mask = np.zeros_like(self.instance_mask)\n for i_obj, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue # do not accumulate background obj_id\n instance_mask_obj = self.instance_mask == obj_id\n # use tightly fit bbox instead of segmentation mask\n if convert_seg_mask_to_box_mask:\n instance_mask_obj = seg_mask_to_box_mask(instance_mask_obj)\n merged_mask = np.logical_or(merged_mask, instance_mask_obj)\n\n # if add noise to gt segmentation\n if add_noise_to_seg != 0:\n is_dilate = add_noise_to_seg > 0\n add_noise_to_seg = abs(add_noise_to_seg)\n kernel = np.ones((add_noise_to_seg, add_noise_to_seg), np.uint8)\n if is_dilate:\n merged_mask = cv2.dilate(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n else:\n merged_mask = cv2.erode(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n cv2.imwrite(\n f\"{self.output_path}/merged_mask.png\", merged_mask.astype(np.uint8) * 255\n )\n self.merged_mask = merged_mask.reshape(-1)\n\n def get_type_of_instance(self, instance_id):\n for obj_info in self.scene_meta[\"objs\"]:\n if obj_info[\"id\"] == instance_id:\n return obj_info[\"classname\"]\n return \"unknown\"\n\n def generate_relation(\n self,\n obj_to_room_distance_th: float = 0.5,\n top_down_dist_th: float = 0.3,\n top_down_xy_close_factor: float = 0.8,\n ):\n \"\"\"\n Generate relationship : object-wall, object-floor, object-object\n \"\"\"\n print(\"Start to generate relation from initial poses and neural models...\")\n all_obj_info = {}\n for i, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue\n Rwo = rotation_6d_to_matrix(self.object_pose_dict[str(obj_id)][\"rot6d\"])\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n all_obj_info[str(obj_id)] = optimized_meta\n with torch.no_grad():\n generate_relation_for_all(\n room_optimizer=self,\n all_obj_info=all_obj_info,\n obj_to_room_distance_th=obj_to_room_distance_th,\n top_down_dist_th=top_down_dist_th,\n top_down_xy_close_factor=top_down_xy_close_factor,\n )\n # print(\"Relation:\\n\", self.relation_info)\n for k, v in self.relation_info.items():\n print(k, v)\n\n def optimize(self, input_rgb: torch.Tensor, pose=None):\n \"\"\"\n Inputs:\n input_rgb: torch.Tensor [h, w, 3] normalized in 0...1\n \"\"\"\n if pose is None:\n pose = np.array(self.scene_meta[\"camera\"][\"cam3d2world\"]).reshape(4, 4)\n # Original poses has rotation in form \"right down forward\", change to NDC \"right up back\"\n fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n pose[:3, :3] = pose[:3, :3] @ fix_rot\n\n # camera to world pose\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n Twc = torch.from_numpy(Twc).float().cuda()\n\n if \"keypoint_mask\" in self.optimize_option:\n # detect keypoint for interest region\n keypoint_mask = detect_keypoints(input_rgb.numpy(), circle_radius=5)\n self.merged_mask = np.logical_and(\n keypoint_mask, self.merged_mask.reshape(keypoint_mask.shape)\n )\n cv2.imwrite(\n f\"{self.output_path}/merged_mask_keypoint.png\",\n self.merged_mask.astype(np.uint8) * 255,\n )\n self.merged_mask = self.merged_mask.reshape(-1)\n\n input_rgb = input_rgb.view(-1, 3) # (H*W, 3) RGB\n\n directions = get_ray_directions_equirectangular(\n self.h, self.w\n ).cuda() # (h, w, 3)\n\n mse_loss = nn.MSELoss(reduction=\"none\")\n\n assert hasattr(\n self, \"params\"\n ), \"Please set initial pose params before optimization.\"\n optimizer = torch.optim.Adam(self.params)\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)\n perceptual_net = perceptual_model.VGG16_for_Perceptual().cuda()\n\n sample_prob = pano_sample_probability(self.h, self.w).reshape(-1)\n\n t = trange(self.N_optim_step, desc=\"Opt.\", leave=True)\n for i_step in t:\n if \"regenerate_relation_during_test\" in self.optimize_option:\n if i_step != 0 and i_step % 50 == 0:\n self.generate_relation()\n if self.adjust_lr_per_step > 0:\n adjust_learning_rate(\n self.lr,\n optimizer,\n i_step,\n base=0.5,\n adjust_lr_every=self.adjust_lr_per_step,\n )\n extra_dict = dict()\n rays_list = []\n object_id_list = []\n # sample according to batch size limitation\n select_ind = np.arange(self.merged_mask.shape[0])[self.merged_mask]\n if (\n \"perceptual_loss\" not in self.optimize_option\n ): # we only sample some points in this case\n # sample according to pano distribution\n select_sample_prob = sample_prob[self.merged_mask]\n select_sample_prob /= select_sample_prob.sum()\n # assert select_ind.shape[0] > self.optim_batch_size\n sample_size = min(select_ind.shape[0], self.optim_batch_size)\n select_ind = np.random.choice(\n select_ind,\n size=sample_size,\n replace=False,\n p=select_sample_prob,\n )\n\n # add some sampling on the background for bg light code\n if self.optimize_light_env:\n bg_sample_ratio = 0.2\n bg_sample_prob = sample_prob[~self.merged_mask]\n bg_sample_prob /= bg_sample_prob.sum()\n bg_sample_ind = np.arange(self.merged_mask.shape[0])[~self.merged_mask]\n # assert bg_sample_ind.shape[0] > self.optim_batch_size\n bg_sample_size = min(\n bg_sample_ind.shape[0], int(bg_sample_ratio * self.optim_batch_size)\n )\n if bg_sample_size > 0:\n bg_sample_ind = np.random.choice(\n bg_sample_ind,\n size=bg_sample_size,\n replace=False,\n p=bg_sample_prob,\n )\n select_ind = np.concatenate([select_ind, bg_sample_ind], axis=-1)\n\n select_ind = np.unique(select_ind)\n if i_step == 0:\n print(\"Actual optimization rays\", select_ind.shape[0])\n select_input_rgb = input_rgb[select_ind].float().cuda()\n\n loss_dict = {}\n all_obj_info = {} # prepare for violation loss\n\n for i, obj_id in enumerate(self.active_instance_id):\n # object to world pose\n if obj_id == 0:\n Rwo = torch.eye(3).cuda()\n two = torch.from_numpy(self.bg_scene_center).float().cuda()\n else:\n Rwo = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n\n # camera to object pose\n Toc = torch.eye(4).cuda()\n Toc[:3, :3] = Rwo.T @ Twc[:3, :3]\n Toc[:3, 3] = Rwo.T @ (Twc[:3, 3] - two)\n\n # generate object rays\n rays_o, rays_d = get_rays(directions, Toc[:3, :4])\n\n rays_o = rays_o[select_ind]\n rays_d = rays_d[select_ind]\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id\n )\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr:\n extra_dict[\n \"embedding_light_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # autoexposure\n if self.optimize_exposure:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n # we do not need to add relation constraints to bg\n if obj_id == 0:\n continue\n\n # enforce optimising on yaw\n if \"z_axis_align_loss\" in self.optimize_option:\n loss_dict[\"z_axis_loss_{}\".format(obj_id)] = (\n z_axis_loss(Rwo, 1.0) * 1e2\n )\n\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n obj_id_key = str(obj_id)\n\n if obj_id_key not in self.relation_info:\n continue\n\n # get obj_relation from input\n obj_relation = self.relation_info[obj_id_key]\n # supplement obj_type\n obj_type = self.get_type_of_instance(obj_id)\n optimized_meta[\"obj_type\"] = obj_type\n\n all_obj_info[str(obj_id)] = optimized_meta\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n \"\"\"attach wall loss\"\"\"\n if (\n \"object_room_wall_attach\" in self.optimize_option\n and obj_relation.get(\"attach_wall\", False)\n ):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n # \"face_direction\": torch.Tensor([0, 1, 0]),\n # \"face_direction\": obj_relation.get(\n # \"attach_wall_face_dir\", torch.Tensor([0, 1, 0])\n # ),\n \"face_direction\": obj_relation[\"attach_wall_face_dir\"],\n \"ray_grid_size\": 10,\n }\n # for door object, we slightly stretch the size to ensure successive hit-test\n if obj_type == \"door\" or obj_type == \"window\":\n kwargs.update(\n {\n \"ray_grid_stretch\": torch.Tensor([1.2, 1.2, 1]),\n \"use_bbox_surface_as_in_detect\": True,\n }\n )\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n \"\"\"attach floor loss\"\"\"\n if (\n \"object_room_floor_attach\" in self.optimize_option\n and obj_relation.get(\"attach_floor\", False)\n ):\n # # TODO(ybbbbt): hard code floor\n # loss_dict.update(\n # obj_attach_floor_loss(optimized_meta, floor=0.0)\n # )\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n \"face_direction\": torch.Tensor([0, 0, -1]),\n \"ray_grid_stretch\": torch.Tensor(\n [0.8, 0.8, 1.0]\n ), # avoid too close to wall\n \"use_bbox_surface_as_in_detect\": True,\n \"ray_grid_size\": 3,\n }\n if obj_type == \"door\":\n # kwargs[\"ray_grid_offset\"] = torch.Tensor(\n # [0, -0.3, 0]\n # ) # to avoid to close to wall\n assert (\n \"attach_wall_face_dir\" in obj_relation\n ), f\"door {obj_id} relation prediction failed.\"\n kwargs[\"ray_grid_offset\"] = (\n obj_relation[\"attach_wall_face_dir\"] * -0.3\n ) # to avoid to close to wall\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n # use_sphere_tracing=True,\n use_sphere_tracing=False,\n **extra_dict,\n )\n pred_rgb = results[\"rgb_fine\"]\n\n if \"photometric_loss\" in self.optimize_option:\n loss_dict[\"mse_loss\"] = mse_loss(pred_rgb, select_input_rgb).mean()\n\n if \"visualize_pred\" in self.optimize_option: # dump image for debug\n # pred_rgb_full = input_rgb.cuda()\n pred_rgb_full = torch.zeros_like(input_rgb.cuda())\n pred_rgb_full[select_ind] = pred_rgb\n\n imageio.imwrite(\n f\"debug/pred_rgb_full.png\",\n (pred_rgb_full * 255)\n .view(self.img_wh[1], self.img_wh[0], 3)\n .detach()\n .cpu()\n .numpy()\n .astype(np.uint8),\n )\n\n if \"perceptual_loss\" in self.optimize_option:\n pred_rgb_full = input_rgb.cuda()\n pred_rgb_full[select_ind] = pred_rgb\n loss_dict.update(\n patch_perceptual_loss(\n perceptual_net,\n pred_rgb_full,\n input_rgb,\n all_obj_info,\n self.instance_mask,\n self.img_wh,\n )\n )\n\n \"\"\"attach bottom to other object loss\"\"\"\n if \"object_object_attach\" in self.optimize_option:\n for obj_id_str, obj_relation in self.relation_info.items():\n if obj_relation.get(\"attach_bottom_to_object\", False):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info_src\": all_obj_info[obj_id_str],\n \"obj_info_tgt\": all_obj_info[\n str(obj_relation[\"attach_tgt_obj_id\"])\n ],\n \"face_direction\": torch.Tensor([0, 0, -1]),\n }\n loss_dict.update(object_object_attach_loss(**kwargs))\n\n # physical violation loss\n if \"physical_violation\" in self.optimize_option:\n if (\n not \"physical_violation_delayed_start\" in self.optimize_option\n or i_step >= 100\n ):\n loss_dict.update(\n physical_violation_loss(\n self,\n all_obj_info,\n N_nearest_obj=3,\n check_background_violation=True,\n # N_sample_points=1000,\n N_sample_points=2000,\n # N_sample_points=300,\n )\n )\n\n if \"viewing_constraint\" in self.optimize_option:\n loss_dict.update(viewing_constraint_loss(self, Twc, all_obj_info))\n\n if \"print_loss_dict\" in self.optimize_option:\n for k, v in loss_dict.items():\n # if \"_62\" not in k:\n # continue\n print(k, \"=\", float(v))\n loss = sum(list(loss_dict.values()))\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n t.set_description(\"Loss: %f\" % float(loss))\n t.refresh()\n # dump image\n if i_step % 20 == 0:\n self.save_optimizable_parameters(\n f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n )\n # self.load_optimizable_parameters(\n # f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n # )\n if i_step >= self.N_optim_step - 20:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n write_idx_on_image=False,\n render_mask=True,\n h=512,\n w=1280,\n )\n else:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n render_mask=False,\n h=self.h,\n w=self.w,\n )\n dump_optimization_meta_to_file(\n filepath=f\"{self.output_path}/{i_step:06d}.optim.json\",\n obj_pose_dict=self.object_pose_dict,\n )" }, { "identifier": "read_real_scene_localization", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization(pose_path: str, transform_info_json_path: str):\n pose_dict = {}\n transform_info = read_json(transform_info_json_path)\n trans_colmap_to_arkit = np.array(transform_info[\"transform_colmap_to_arkit_sRT\"])\n trans_align = np.array(transform_info[\"transform_alignment\"])\n with open(pose_path) as file:\n lines = file.readlines()\n lines = lines[1:]\n for line in lines:\n fname, tx, ty, tz, qx, qy, qz, qw, _, _ = line.strip().split(\" \")\n fname += \".png\"\n pose = np.eye(4)\n pose[0, 3] = tx\n pose[1, 3] = ty\n pose[2, 3] = tz\n # Twc\n pose[:3, :3] = Rotation.from_quat([qx, qy, qz, qw]).as_matrix()\n # pose = np.linalg.inv(pose)\n # pose_ndc = np.linalg.inv(pose_ndc)\n\n # convert to ndc\n # pose_ndc = pose\n # fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n # pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot\n\n # transform to arkit pose\n s, R, t = decompose_to_sRT(trans_colmap_to_arkit)\n # pose_ndc = transform_colmap_to_arkit @ pose_ndc\n # print(s, R, t)\n pose[:3, 3] = R @ (pose[:3, 3] * s) + t\n pose[:3, :3] = R @ pose[:3, :3]\n\n # apply alignment to poses\n pose = trans_align @ pose\n\n pose_dict[fname] = {\"pose_slam_Twc\": pose}\n # print(fname, pose)\n return pose_dict" }, { "identifier": "read_testing_config", "path": "optim/misc_utils.py", "snippet": "def read_testing_config():\n conf_cli = OmegaConf.from_cli()\n conf_test_file = OmegaConf.load(conf_cli.config)\n # read dataset config\n conf_test_file[\"dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"dataset_config_path\"]\n )\n conf_test_file[\"bg_dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"bg_dataset_config_path\"]\n )\n\n # processing ckpt\n ckpt_path_dict = {}\n for item in conf_test_file[\"ckpt_lists\"]:\n path = item[\"path\"]\n obj_ids = item[\"obj_ids\"]\n neus_conf = item.get(\"neus_conf\", \"config/neus.yaml\")\n for obj_id in obj_ids:\n ckpt_path_dict[str(obj_id)] = {\"path\": path, \"neus_conf\": neus_conf}\n conf_test_file[\"ckpt_path_dict\"] = ckpt_path_dict\n\n conf_merged = OmegaConf.merge(conf_test_file, conf_cli)\n return conf_merged" } ]
import sys import os import torch import numpy as np import imageio import time import cv2 from tqdm import tqdm from argparse import ArgumentParser from utils.util import read_json, read_yaml from optim.room_optimizer import RoomOptimizer from optim.misc_utils import read_real_scene_localization, read_testing_config from scipy.spatial.transform import Rotation
14,139
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0]
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0]
for obj_info in read_json(scene_info_json_path)["objs"]:
0
2023-10-15 08:41:29+00:00
16k
WenzhengZhang/Seq2seqCoref
trainer.py
[ { "identifier": "CorefAllMetrics", "path": "metrics.py", "snippet": "class CorefAllMetrics(object):\n \"\"\"\n Wrapper for coreference resolution metrics.\n \"\"\"\n\n @staticmethod\n def _get_mention_to_x(clusters: List[list]) -> dict:\n mention_to_x = {}\n for cluster in clusters:\n for m in cluster:\n mention_to_x[m] = tuple(cluster)\n return mention_to_x\n\n def _compute_mention_detect_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]):\n # mention detection evaluation\n mention_evaluator = MentionEvaluator()\n results = {}\n predicted_mentions = list(self._get_mention_to_x(\n predicted_clusters).keys())\n gold_mentions = list(self._get_mention_to_x(gold_clusters).keys())\n mention_evaluator.update(predicted_mentions, gold_mentions)\n mention_precision, mention_recall, mention_f1 = \\\n mention_evaluator.get_prf()\n results['precision'] = mention_precision\n results['recall'] = mention_recall\n results['f1'] = mention_f1\n return results\n\n def _compute_coref_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]) \\\n -> Dict[str, Dict[str, float]]:\n \"\"\"\n Compute all coreference metrics given a list of gold cluster and a list of predicted clusters.\n \"\"\"\n mention_to_predicted = self._get_mention_to_x(predicted_clusters)\n mention_to_gold = self._get_mention_to_x(gold_clusters)\n result = {}\n metric_name_evals = [('muc', Evaluator(muc)),\n ('b_cubed', Evaluator(b_cubed)),\n ('ceaf', Evaluator(ceafe))]\n\n for name, evaluator in metric_name_evals:\n evaluator.update(predicted_clusters, gold_clusters,\n mention_to_predicted, mention_to_gold)\n result[name] = {\n 'precision': evaluator.get_precision(),\n 'recall': evaluator.get_recall(),\n 'f1': evaluator.get_f1()\n }\n\n result['average'] = {\n 'precision': sum(\n [result[k]['precision'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'recall': sum(\n [result[k]['recall'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'f1': sum([result[k]['f1'] for k, _ in metric_name_evals]) / len(\n metric_name_evals)\n }\n\n return result\n\n @staticmethod\n def _average_nested_dict(\n list_nested_dict: List[Dict[str, Dict[str, float]]]) -> Dict[\n str, Dict[str, float]]:\n \"\"\"\n Given a list of 2-level nested dict, compute the average.\n \"\"\"\n result_dict = {}\n\n # sum up all values\n for outer_dict in list_nested_dict:\n for key_outer, value_outer in outer_dict.items():\n if key_outer not in result_dict:\n result_dict[key_outer] = {}\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[\n key_outer].get(\n key_inner, 0.0) + value_inner\n\n # take the average\n for key_outer, value_outer in result_dict.items():\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[key_outer][\n key_inner] / len(\n list_nested_dict)\n\n return result_dict\n\n def get_all_metrics(self, labels: List[List[List[Tuple[int, int]]]],\n preds: List[List[List[Tuple[int, int]]]]) \\\n -> Dict[str, Dict[str, Dict[str, float]]]:\n \"\"\"\n Compute all metrics for coreference resolution.\n In input are given two list of mention groups, for example:\n [ # this is the corpus level, with a list of documents\n [ # this is the document level, with a list of mention clusters\n [ # this is the cluster level, with a list of spans\n (5, 7),\n (11, 19),\n ...\n ],\n ...\n ]\n ]\n \"\"\"\n assert len(labels) == len(preds)\n result = {}\n\n # compute micro-averaged scores (treat all clusters from all docs as a single list of clusters)\n gold_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(labels) for cluster in clusters\n ]\n predicted_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(preds) for cluster in clusters\n ]\n coref_ment_results = self._compute_coref_metrics(gold_clusters,\n predicted_clusters)\n ment_results = self._compute_mention_detect_metrics(gold_clusters,\n predicted_clusters)\n coref_ment_results['mention_detect'] = ment_results\n result['micro'] = coref_ment_results\n\n # compute macro-averaged scores (compute p/r/f1 for each doc first, then take average per doc)\n doc_metrics = []\n for gold_clusters, predicted_clusters in zip(labels, preds):\n doc_metrics.append(self._compute_coref_metrics(\n gold_clusters, predicted_clusters\n ))\n result['macro'] = self._average_nested_dict(doc_metrics)\n\n return result" }, { "identifier": "get_document_predicts", "path": "data.py", "snippet": "def get_document_predicts(doc_preds: List[List]) -> List[\n List[Tuple[int, int]]]:\n \"\"\"\n Aggregate predictions for each chunk into document-level predictions.\n \"\"\"\n if len(doc_preds) == 0:\n return []\n graph = nx.compose_all([nx.complete_graph(p) for p in doc_preds])\n\n processed_groups = []\n for component in nx.connected_components(graph):\n processed_group = []\n for start, end in sorted(component, key=lambda x: (x[0], -x[1])):\n # add this entity if it does not overlap with the previous one\n condition = not any(\n [s < start < e < end for (s, e) in processed_group])\n # if len(processed_group) == 0 or start >= processed_group[-1][1]:\n # processed_group.append((start, end))\n if len(processed_group) == 0 or condition:\n processed_group.append((start, end))\n\n processed_groups.append(processed_group)\n\n return [[(start, end) for start, end in group] for group in\n processed_groups]" }, { "identifier": "parse_int_output_tokens", "path": "data.py", "snippet": "def parse_int_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n thred, is_tagging):\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_output_ids = []\n if is_tagging:\n new_input_ids = [special_ids['copy'] for t in input_ids if\n t != tokenizer.pad_token_id and t != special_ids[\n 'eos']]\n new_input_ids.append(special_ids['eos'])\n else:\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n token_mentions = []\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n new_id += 1\n ment_start_stack.append([new_id, 'name', []])\n if is_tagging:\n new_output_ids.append(output_ids[i])\n elif output_ids[i] == special_ids['mention_end']:\n new_id += 0\n if is_tagging:\n new_output_ids.append(output_ids[i])\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n if item[1] == \"ent\":\n unmatched_clusters[tuple(item[-1])].append(\n (item[0], new_id))\n else:\n # a normal token\n # if output_ids[i] == special_ids['sep']:\n # status = \"ent\"\n if len(ment_start_stack) > 0:\n # inside some entities\n if output_ids[i] == special_ids['sep']:\n ment_start_stack[-1][1] = \"ent\"\n if is_tagging:\n new_output_ids.append(output_ids[i])\n else:\n if ment_start_stack[-1][1] == 'ent':\n ment_start_stack[-1][2].append(output_ids[i])\n if is_tagging:\n new_output_ids.append(output_ids[i])\n elif ment_start_stack[-1][1] == 'name':\n new_id += 1\n rec_ids.append(output_ids[i])\n if is_tagging:\n new_output_ids.append(input_ids[new_id])\n else:\n raise ValueError('wrong status')\n else:\n # outside\n new_id += 1\n rec_ids.append(output_ids[i])\n if is_tagging:\n new_output_ids.append(input_ids[new_id])\n if output_ids[i] == special_ids['mention_start']:\n new_id -= 1\n # thred = 1 if allow_singletons else 2\n # Needleman-Wunsch text alignment algorithm\n wrong_reconstruction = (rec_ids != new_input_ids)\n if wrong_reconstruction:\n print(f'new input ids {new_input_ids}')\n print(f'reconstructed ids {rec_ids}')\n print(f'out ids {output_ids}')\n print('wrong reconstruction! please debug')\n matching = global_align(new_input_ids, rec_ids)\n\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n token_mentions.append((new_start, new_end))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n token_mentions = list(set(token_mentions))\n else:\n clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v\n in\n unmatched_clusters.values()]\n predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=\n thred]\n token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()\n for m in v]\n token_mentions = list(set(token_mentions))\n if not is_tagging:\n new_output_ids = output_ids\n return predict_clusters, token_mentions, new_output_ids" }, { "identifier": "parse_short_target_tokens", "path": "data.py", "snippet": "def parse_short_target_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n align_mode, thred, split_sentence):\n # support mark sentence, align sentence by sentence\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n ment_start_stack.append([new_id + 1, 'name', []])\n elif output_ids[i] == special_ids['mention_end']:\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n if item[1] == \"ent\":\n unmatched_clusters[tuple(item[-1])].append(\n (item[0], new_id))\n else:\n # a normal token\n if len(ment_start_stack) > 0:\n # inside some entities\n if output_ids[i] == special_ids['sep']:\n ment_start_stack[-1][1] = \"ent\"\n else:\n if ment_start_stack[-1][1] == 'ent':\n ment_start_stack[-1][2].append(output_ids[i])\n elif ment_start_stack[-1][1] == 'name':\n new_id += 1\n rec_ids.append(output_ids[i])\n else:\n raise ValueError('wrong status')\n\n else:\n # outside\n new_id += 1\n rec_ids.append(output_ids[i])\n # mapping.append(new_id)\n # thred = 1 if allow_singletons else 2\n # Affine global text alignment algorithm\n if split_sentence:\n input_sents = split_list(\n new_input_ids, special_ids['sentence_start'], True)\n out_sents = split_list(rec_ids, special_ids['sentence_start'], True)\n try:\n assert len(input_sents) == len(out_sents)\n aligned_input_ids, aligned_rec_ids, matching = [], [], {}\n input_offset, out_offset = 0, 0\n for input_sent, out_sent in zip(input_sents, out_sents):\n aligned_input_sent, aligned_out_sent, sent_match = \\\n affine_global_align(input_sent, out_sent,\n special_ids['copy'],\n align_mode)\n aligned_input_ids.extend(aligned_input_sent)\n aligned_rec_ids.extend(aligned_out_sent)\n matching.update(\n {k + out_offset: v + input_offset for k, v in\n sent_match.items()})\n input_offset += len(input_sent)\n out_offset += len(out_sent)\n except AssertionError:\n print(f'input sents and out sents different length '\n f'{len(input_sents)} vs {len(out_sents)}, have to use '\n f'global alignment')\n aligned_input_ids, aligned_rec_ids, matching = affine_global_align(\n new_input_ids, rec_ids, special_ids['copy'], align_mode)\n else:\n aligned_input_ids, aligned_rec_ids, matching = affine_global_align(\n new_input_ids, rec_ids, special_ids['copy'], align_mode)\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n return predict_clusters, aligned_input_ids, aligned_rec_ids" }, { "identifier": "parse_nonint_output_tokens", "path": "data.py", "snippet": "def parse_nonint_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map,\n tokenizer,\n add_mention_end,\n thred):\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n token_mentions = []\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n new_id += 1\n ment_start_stack.append(new_id)\n elif add_mention_end and output_ids[i] == special_ids['mention_end']:\n assert output_ids[i + 1] in special_ids['cluster_ids_to_num']\n cid = special_ids['cluster_ids_to_num'][output_ids[i + 1]]\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n unmatched_clusters[cid].append((item, new_id))\n elif output_ids[i] in special_ids['cluster_ids_to_num']:\n if not add_mention_end:\n cid = special_ids['cluster_ids_to_num'][output_ids[i]]\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n unmatched_clusters[cid].append((item, new_id))\n else:\n new_id += 1\n rec_ids.append(output_ids[i])\n if output_ids[i] == special_ids['mention_start']:\n new_id -= 1\n # Needleman-Wunsch text alignment algorithm\n wrong_reconstruction = (rec_ids != new_input_ids)\n # thred = 1 if allow_singletons else 2\n if wrong_reconstruction:\n print(f'new input ids {new_input_ids}')\n print(f'reconstructed ids {rec_ids}')\n print(f'out ids {output_ids}')\n print('wrong reconstruction! please debug')\n matching = global_align(new_input_ids, rec_ids)\n\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n token_mentions.append((new_start, new_end))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n token_mentions = list(set(token_mentions))\n else:\n clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v\n in\n unmatched_clusters.values()]\n predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=\n thred]\n token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()\n for m in v]\n token_mentions = list(set(token_mentions))\n return predict_clusters, token_mentions, output_ids" }, { "identifier": "SPECIAL_IDS", "path": "constants.py", "snippet": "SPECIAL_IDS = {\n 'speaker_start': int_tokenizer.encode(SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end': int_tokenizer.encode(SPEAKER_END, add_special_tokens=False)[\n 0],\n 'mention_start': int_tokenizer.encode(MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': int_tokenizer.encode(MENTION_END, add_special_tokens=False)[\n 0],\n 'sep': int_tokenizer.encode(SEP_TOKEN, add_special_tokens=False)[0],\n 'copy': int_tokenizer.encode(COPY, add_special_tokens=False)[0],\n 'eos': int_tokenizer.eos_token_id\n}" }, { "identifier": "MARK_SPECIAL_IDS", "path": "constants.py", "snippet": "MARK_SPECIAL_IDS = deepcopy(SPECIAL_IDS)" }, { "identifier": "NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "NON_INT_SPECIAL_IDS = {\n 'speaker_start': non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'cluster_ids': MENTION_ENDS_IDS,\n 'cluster_ids_to_num': END_IDS_TO_NUM,\n 'cluster_new': non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MENTION_END_NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "MENTION_END_NON_INT_SPECIAL_IDS = {\n 'speaker_start': mention_end_non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n mention_end_non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': mention_end_non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': mention_end_non_int_tokenizer.encode(\n MENTION_END,\n add_special_tokens=False)[0],\n 'cluster_ids': CLUSTER_IDS,\n 'cluster_ids_to_num': CLUSTER_IDS_TO_NUM,\n 'cluster_new': mention_end_non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': mention_end_non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': mention_end_non_int_tokenizer.eos_token_id\n}" }, { "identifier": "ShortSeqProcessor", "path": "logits_processor.py", "snippet": "class ShortSeqProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids):\n self.orig_inputs = orig_inputs\n self.sentence_start = special_ids['sentence_start']\n self.sentence_end = special_ids['sentence_end']\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids['mention_end']\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [special_ids['mention_end']]\n self.eos_id = special_ids['eos']\n self.sentence_mask = self.get_sentence_mask(orig_inputs)\n\n def get_sentence_mask(self, orig_inputs: torch.Tensor):\n # index from 1 instead of 0\n return (orig_inputs == self.sentence_start).cumsum(-1)\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n is_sent_start = (input_ids == self.sentence_start)\n is_sent_end = (input_ids == self.sentence_end)\n sent_idx = is_sent_start.sum(-1, keepdim=True)\n unclose_sent = (sent_idx.sum(-1) - is_sent_end.sum(-1)) > 0\n close_sent = (~unclose_sent)\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n unclose_ent = (is_ent[:, -1] & unclose_sent)\n unclose_ment = (is_start.sum(-1) - is_sep.sum(-1)) > 0\n close_ent = (~unclose_ent)\n unclose_ment = (close_ent & unclose_ment & unclose_sent)\n masks = torch.ones_like(scores, dtype=torch.bool)\n masks[unclose_sent, self.sentence_end] = False\n masks[close_sent, self.sentence_start] = False\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n sent_mask = self.sentence_mask.repeat_interleave(num_beams, 0)\n cur_sent_mask = (sent_mask != sent_idx)\n sent_ids = orig_ids.masked_fill(cur_sent_mask, self.sentence_end)\n masks[unclose_sent] = masks[unclose_sent].scatter(1, sent_ids[\n unclose_sent], False)\n masks[unclose_sent, self.sentence_start] = True\n masks[unclose_ent, torch.tensor(self.ent_ids).unsqueeze(1)] = False\n masks[close_ent, self.mention_start] = False\n masks[unclose_ment, self.sep] = False\n is_eos = (close_sent & (sent_idx.sum(-1) == sent_mask[:, -1]))\n masks[is_eos] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" }, { "identifier": "IntProcessor", "path": "logits_processor.py", "snippet": "class IntProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids, seq2seq_type):\n \"\"\"\n\n :param orig_inputs: original input_ids\n :param special_ids: dict with keys:[mention_start, mention_end, sep,\n integers]\n \"\"\"\n self.orig_inputs = orig_inputs\n self.seq2seq_type = seq2seq_type\n self.special_ids = special_ids\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids['mention_end']\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [special_ids['mention_end']]\n self.specials = [self.mention_start, self.sep] + self.ent_ids\n if self.seq2seq_type == 'action' or self.seq2seq_type == 'tagging' or \\\n self.seq2seq_type == 'input_feed':\n self.copy_id = special_ids['copy']\n self.specials.append(self.copy_id)\n self.eos_id = special_ids['eos']\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n\n :param input_ids: BC x l\n :param scores: BC x V\n :return:\n \"\"\"\n # input_ids : B x L\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n is_copy = ((~is_start) & (~is_ent) & (~is_end))\n unclose_ent = is_ent[:, -1]\n unclose_ment = (is_start.sum(-1) - is_sep.sum(-1)) > 0\n unclose_ment = ((~unclose_ent) & unclose_ment)\n # -1 for <pad> at begining\n num_copied = is_copy.sum(-1) - 1\n masks = torch.ones_like(scores, dtype=torch.bool)\n close_ent = (~unclose_ent)\n num_copied = num_copied.clamp(max=self.orig_inputs.size(1) - 1)\n # unclosed ent only allows to generate cluster ids or end mention id\n masks[unclose_ent, torch.tensor(self.ent_ids).unsqueeze(1)] = False\n masks[close_ent, self.mention_start] = False\n masks[unclose_ment, self.sep] = False\n # get next copy id\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n next_ids = orig_ids[torch.arange(scores.size(0)), num_copied]\n if self.seq2seq_type == 'tagging':\n masks[close_ent, self.copy_id] = False\n else:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[close_ent, next_ids[close_ent]] = scores[close_ent,\n self.copy_id]\n masks[close_ent, next_ids[close_ent]] = False\n is_eos = (close_ent & (next_ids == self.eos_id))\n masks[is_eos, torch.tensor(self.specials).unsqueeze(1)] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" }, { "identifier": "NonIntProcessor", "path": "logits_processor.py", "snippet": "class NonIntProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids,\n seq2seq_type,\n add_mention_end):\n \"\"\"\n\n :param orig_inputs: original input_ids\n :param special_ids: dict with keys:[mention_start, mention_end, sep,\n integers]\n :param add_mention_end: whether predict mention end before predict\n cluster ids\n \"\"\"\n self.orig_inputs = orig_inputs\n self.special_ids = special_ids\n self.seq2seq_type = seq2seq_type\n self.mention_start = special_ids['mention_start']\n if add_mention_end:\n self.mention_end = special_ids['mention_end']\n else:\n self.mention_end = None\n self.cluster_ids = torch.tensor(special_ids['cluster_ids'],\n dtype=torch.long)\n self.cluster_new = special_ids['cluster_new']\n self.copy_id = special_ids['copy']\n self.eos_id = special_ids['eos']\n self.first_cluster_id = special_ids['cluster_ids'][0]\n self.last_cluster_id = special_ids['cluster_ids'][-1]\n self.add_mention_end = add_mention_end\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n\n :param input_ids: BC x l\n :param scores: BC x V\n :return:\n \"\"\"\n # input_ids : B x L\n cluster_ids = self.cluster_ids.to(input_ids.device)\n range_indices = torch.arange(scores.size(0))\n is_not_cid = torch.isin(input_ids, cluster_ids, invert=True)\n is_not_start = (input_ids != self.mention_start)\n if self.add_mention_end:\n is_not_end = (input_ids != self.mention_end)\n unclosed_ent = (input_ids[:, -1] == self.mention_end)\n close_ent = (~unclosed_ent)\n is_copy = (is_not_start & is_not_end & is_not_cid)\n else:\n is_not_end = is_not_cid\n is_copy = (is_not_start & is_not_end)\n unclosed_ment = (is_not_start.sum(-1) - is_not_end.sum(-1)) < 0\n if self.add_mention_end:\n unclosed_ment = (close_ent & unclosed_ment)\n # -1 for <pad> at begining\n num_copied = is_copy.sum(-1) - 1\n masks = torch.ones_like(scores, dtype=torch.bool)\n num_copied = num_copied.clamp(max=self.orig_inputs.size(1) - 1)\n # unclosed ent only allows to generate cluster ids or end mention id\n # masks[:, self.specials] = False\n if self.add_mention_end:\n masks[close_ent, self.mention_start] = False\n masks[unclosed_ment, self.mention_end] = False\n else:\n masks[:, self.mention_start] = False\n # notice: make sure </mk> and </mk+1> are next to each other in vocab\n cluster_input_ids = input_ids.masked_fill(\n is_not_cid,\n self.first_cluster_id - 1)\n next_cids = cluster_input_ids.amax(-1) + 1\n if self.add_mention_end:\n has_prev_ends = (unclosed_ent & (next_cids > self.first_cluster_id))\n masks[unclosed_ent, next_cids[unclosed_ent]] = False\n else:\n has_prev_ends = (unclosed_ment & (next_cids >\n self.first_cluster_id))\n masks[unclosed_ment, next_cids[unclosed_ment]] = False\n\n masks[has_prev_ends] = masks[has_prev_ends].scatter(\n 1, cluster_input_ids[has_prev_ends], False)\n masks[has_prev_ends, self.first_cluster_id - 1] = True\n # get next copy id\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n next_ids = orig_ids[range_indices, num_copied]\n if self.add_mention_end:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[close_ent, next_ids[close_ent]] = scores[close_ent,\n self.copy_id]\n scores[unclosed_ent, next_cids[unclosed_ent]] = scores[\n unclosed_ent, self.cluster_new]\n masks[close_ent, next_ids[close_ent]] = False\n else:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[range_indices, next_ids] = scores[:, self.copy_id]\n scores[unclosed_ment, next_cids[unclosed_ment]] = scores[\n unclosed_ment,\n self.cluster_new]\n masks[range_indices, next_ids] = False\n is_eos = (next_ids == self.eos_id)\n masks[is_eos] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" } ]
import time import torch.distributed as dist import sys import numpy as np import os import json import re import torch.nn as nn import torch import shutil import math import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl import smdistributed.modelparallel.torch as smp import safetensors.torch from tqdm.auto import tqdm from transformers.trainer_utils import HPSearchBackend, speed_metrics, \ TrainOutput from pathlib import Path from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from transformers.trainer_callback import TrainerState from transformers.trainer import TRAINER_STATE_NAME, OptimizerNames from transformers.utils import is_apex_available from transformers.integrations import hp_params from transformers import Seq2SeqTrainer from packaging import version from collections import defaultdict from metrics import CorefAllMetrics from typing import Dict, Union, Any, Optional, Tuple, List from transformers.debug_utils import DebugOption, DebugUnderflowOverflow from transformers.pytorch_utils import is_torch_less_than_1_11 from torch.utils.data import DataLoader from transformers.trainer_utils import EvalLoopOutput, has_length, \ denumpify_detensorize, ShardedDDPOption from data import get_document_predicts, parse_int_output_tokens, \ parse_short_target_tokens, parse_nonint_output_tokens from constants import SPECIAL_IDS, MARK_SPECIAL_IDS, NON_INT_SPECIAL_IDS, \ MENTION_END_NON_INT_SPECIAL_IDS from transformers.deepspeed import deepspeed_init from transformers.trainer_pt_utils import find_batch_size, nested_concat, \ nested_numpify, IterableDatasetShard, nested_truncate, get_parameter_names from transformers.modeling_utils import PreTrainedModel, unwrap_model, \ load_sharded_checkpoint from transformers.utils import logging, is_torch_tpu_available, \ is_sagemaker_mp_enabled, is_safetensors_available, SAFE_WEIGHTS_NAME, \ WEIGHTS_NAME, WEIGHTS_INDEX_NAME from transformers.integrations import is_fairscale_available from transformers.dependency_versions_check import dep_version_check from smdistributed.modelparallel import __version__ as SMP_VERSION from apex import amp from transformers import LogitsProcessorList from logits_processor import ShortSeqProcessor, IntProcessor, NonIntProcessor from transformers.trainer_seq2seq import is_deepspeed_zero3_enabled
11,163
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of # samplers has been rounded to a multiple of batch_size, so we truncate. if all_losses is not None: all_losses = all_losses[:num_samples] if all_preds is not None: all_preds = nested_truncate(all_preds, num_samples) if all_labels is not None: all_labels = nested_truncate(all_labels, num_samples) if all_inputs is not None: all_inputs = nested_truncate(all_inputs, num_samples) # Metrics! doc_labels = eval_dataset.doc_labels eval_samples = eval_dataset.samples split = eval_dataset.split if self.args.joint_train: doc_id_to_name = eval_dataset.id_to_name else: doc_id_to_name = None # allow_singletons = eval_dataset.data_args.allow_singletons assert all_preds is not None metrics = self.my_compute_metrics(doc_labels, all_preds, eval_samples, split, doc_id_to_name) # if all_preds is not None and doc_labels is not None: # metrics = self.get_eval_metrics(doc_labels, all_preds, # eval_samples, split) # else: # metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if all_losses is not None: metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) if self.args.gradient_checkpointing: self.model.config.use_cache = False return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys: list of ignore keys Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step( model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys ) has_labels = "labels" in inputs inputs = self._prepare_inputs(inputs) # XXX: adapt synced_gpus for fairscale as well gen_kwargs = self._gen_kwargs.copy() gen_kwargs["max_length"] = ( gen_kwargs["max_length"] if gen_kwargs.get( "max_length") is not None else self.model.config.max_length ) gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get( "num_beams") is not None else self.model.config.num_beams ) default_synced_gpus = True if is_deepspeed_zero3_enabled() else False gen_kwargs["synced_gpus"] = ( gen_kwargs["synced_gpus"] if gen_kwargs.get( "synced_gpus") is not None else default_synced_gpus ) if "attention_mask" in inputs: gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) if "global_attention_mask" in inputs: gen_kwargs["global_attention_mask"] = inputs.get( "global_attention_mask", None) # prepare generation inputs # some encoder-decoder models can have varying encoder's and thus # varying model input names if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name: generation_inputs = inputs[self.model.encoder.main_input_name] else: generation_inputs = inputs[self.model.main_input_name] # add our logits_processor here if self.args.seq2seq_type != 'short_seq': if self.args.action_type == 'non_integer': special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS gen_kwargs['logits_processor'] = LogitsProcessorList(
if is_torch_tpu_available(check_device=False): if is_fairscale_available(): dep_version_check("fairscale") if is_sagemaker_mp_enabled(): IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse( "1.10") else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): if is_apex_available(): logger = logging.get_logger(__name__) TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" class CorefTrainer(Seq2SeqTrainer): def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if self.args.val_after_train and self.args.eval_delay < \ self.state.global_step: for checkpoint in checkpoints_sorted[:-1]: states_dir = [str(x) for x in Path( checkpoint).glob(f'global_step*') if os.path.isdir(x)] for state_dir in states_dir: logger.info(f"Deleting optimizer states of saved " f"checkpoint {checkpoint}") if os.path.exists(state_dir) and os.path.isdir( state_dir): shutil.rmtree(state_dir) else: if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[ -1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len( checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[ :number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel) and not hasattr( self.model, 'save_pretrained'): if state_dict is None: state_dict = self.model.state_dict() if isinstance(unwrap_model(self.model), PreTrainedModel): unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") # if self.args.save_safetensors: # safetensors.torch.save_file(state_dict, # os.path.join(output_dir, # SAFE_WEIGHTS_NAME)) # else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self._train_batch_size = batch_size # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size else: max_steps = math.ceil( args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples( train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torch.distributed.launch)." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = ( self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE or is_sagemaker_mp_enabled() or self.fsdp is not None ) if args.deepspeed: deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine self.optimizer = optimizer self.lr_scheduler = lr_scheduler elif not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable() model = self._wrap_model(self.model_wrapped) if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: self._load_from_checkpoint(resume_from_checkpoint, model) # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model if delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Num Epochs = {num_train_epochs}") logger.info( f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps}") logger.info( f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" ) self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % ( num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info( " Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info( f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " "flag to your launch command, but you will resume the training on data already seen by your model." ) if self.is_local_process_zero() and not args.disable_tqdm: steps_trained_progress_bar = tqdm( total=steps_trained_in_current_epoch) steps_trained_progress_bar.set_description( "Skipping the first batches") # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. if not args.ignore_data_skip: for epoch in range(epochs_trained): is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( train_dataloader.sampler, RandomSampler ) if is_torch_less_than_1_11 or not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. # That was before PyTorch 1.11 however... if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) for _ in train_dataloader: break else: # Otherwise we need to call the whooooole sampler cause there is some random operation added # AT THE VERY END! _ = list(train_dataloader.sampler) if args.manual_empty_cache: torch.cuda.empty_cache() for epoch in range(epochs_trained, num_train_epochs): if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) if isinstance(train_dataloader, DataLoader) and isinstance( train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) elif hasattr(train_dataloader, "dataset") and isinstance( train_dataloader.dataset, IterableDatasetShard): train_dataloader.dataset.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [ args.device]).per_device_loader(args.device) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) step = -1 if args.manual_empty_cache: torch.cuda.empty_cache() for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if args.manual_empty_cache: torch.cuda.empty_cache() if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # if args.manual_empty_cache: # torch.cuda.empty_cache() if ( ((step + 1) % args.gradient_accumulation_steps != 0) and args.local_rank != -1 and args._no_sync_in_gradient_accumulation ): # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. with model.no_sync(): tr_loss_step = self.training_step(model, inputs) else: tr_loss_step = self.training_step(model, inputs) if ( args.logging_nan_inf_filter and not is_torch_tpu_available() and ( torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss += tr_loss / ( 1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps if self.deepspeed: if args.manual_empty_cache: torch.cuda.empty_cache() self.deepspeed.step() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: # deepspeed does its own clipping if self.do_grad_scaling: # Reduce gradients first for XLA if is_torch_tpu_available(): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) # AMP: gradients need unscaling self.scaler.unscale_(self.optimizer) if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) elif hasattr(self.optimizer, "clip_grad_norm"): # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping self.optimizer.clip_grad_norm(args.max_grad_norm) elif hasattr(model, "clip_grad_norm_"): # Some models (like FullyShardedDDP) have a specific way to do gradient clipping model.clip_grad_norm_(args.max_grad_norm) else: # Revert to normal clipping otherwise, handling Apex or full precision nn.utils.clip_grad_norm_( amp.master_params( self.optimizer) if self.use_apex else model.parameters(), args.max_grad_norm, ) # Optimizer step optimizer_was_run = True if self.deepspeed: pass # called outside the loop elif is_torch_tpu_available(): if self.do_grad_scaling: self.scaler.step(self.optimizer) self.scaler.update() else: xm.optimizer_step(self.optimizer) elif self.do_grad_scaling: scale_before = self.scaler.get_scale() self.scaler.step(self.optimizer) self.scaler.update() scale_after = self.scaler.get_scale() optimizer_was_run = scale_before <= scale_after else: self.optimizer.step() if optimizer_was_run and not self.deepspeed: self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch if args.manual_empty_cache: torch.cuda.empty_cache() self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning( "There seems to be not a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info( "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sur the model has been saved by process 0. if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.local_rank != -1: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS pred_data, pred_token_mentions, predict_ids = \ parse_nonint_output_tokens( input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.add_mention_end, thred) pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in pred_token_mentions] pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = {'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'predict_clusters': pred_data, 'gold_clusters': gold_data, 'predict_token_mentions': pred_token_mentions } # list of (m1,m2) documents_to_chunk_data[doc_id].extend(pred_data) documents_to_chunk_gold[doc_id].extend(gold_data) out_sents.append(out_predict) if doc_id != last_doc_id: predictions[last_doc_id] = get_document_predicts( documents_to_chunk_data[ last_doc_id]) golds[last_doc_id] = get_document_predicts( documents_to_chunk_gold[ last_doc_id]) last_doc_id = doc_id # final one predictions[last_doc_id] = get_document_predicts( documents_to_chunk_data[last_doc_id] ) golds[last_doc_id] = get_document_predicts( documents_to_chunk_gold[last_doc_id] ) # print(predictions) if self.args.joint_train: predictions_list = defaultdict(list) labels_list = defaultdict(list) golds_list = defaultdict(list) else: predictions_list = [] labels_list = [] golds_list = [] for document_id, doc_label in doc_labels.items(): if self.args.joint_train: predictions_list[id_to_name[document_id]].append( predictions[document_id]) labels_list[id_to_name[document_id]].append(doc_label) golds_list[id_to_name[document_id]].append(golds[document_id]) else: predictions_list.append(predictions[document_id]) labels_list.append(doc_label) golds_list.append(golds[document_id]) if self.args.joint_train: label_results = {} gold_results = {} for dn in predictions_list.keys(): metrics = CorefAllMetrics().get_all_metrics( labels_list[dn], predictions_list[dn]) metrics_golds = CorefAllMetrics().get_all_metrics( golds_list[dn], predictions_list[dn]) single_label_results = { f'{dn}_{metric_name}_{x}': v for metric_name, metric_values in metrics['micro'].items() for x, v in metric_values.items() } single_gold_results = { f'{dn}_gold_{metric_name}_{x}': v for metric_name, metric_values in metrics_golds['micro'].items() for x, v in metric_values.items() } label_results.update(single_label_results) gold_results.update(single_gold_results) else: metrics = CorefAllMetrics().get_all_metrics(labels_list, predictions_list) metrics_golds = CorefAllMetrics().get_all_metrics(golds_list, predictions_list) label_results = { f'{metric_name}_{x}': v for metric_name, metric_values in metrics['micro'].items() for x, v in metric_values.items() } gold_results = { f'gold_{metric_name}_{x}': v for metric_name, metric_values in metrics_golds['micro'].items() for x, v in metric_values.items() } results = {**label_results, **gold_results} if self.args.joint_train: avg_f1s = [results[f"{dname}_average_f1"] for dname in data_names] results["average_f1"] = sum(avg_f1s) / len(avg_f1s) if self.is_world_process_zero() and self.args.save_predicts: os.makedirs(self.args.save_dir, exist_ok=True) save_path = os.path.join(self.args.save_dir, f'{split}-predicts.txt') results_path = os.path.join(self.args.save_dir, f'{split}-results.json') with open(save_path, 'w') as f: for p in out_sents: f.write('%s\n' % json.dumps(p)) with open(results_path, 'w') as f: json.dump(results, f) return results def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = False, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args prediction_loss_only = False # if eval is called w/o train init deepspeed here if args.deepspeed and not self.deepspeed: # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval # from the checkpoint eventually deepspeed_engine, _, _ = deepspeed_init( self, num_training_steps=0, resume_from_checkpoint=None, inference=is_deepspeed_zero3_enabled() ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine if self.args.gradient_checkpointing: self.model.config.use_cache = True model = self._wrap_model(self.model, training=False, dataloader=dataloader) # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called # while ``train`` is running, cast it to the right dtype first and then put on device if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = self.args.eval_batch_size logger.info(f"***** Running {description} *****") if has_length(dataloader): logger.info(f" Num examples = {self.num_examples(dataloader)}") else: logger.info(" Num examples: Unknown") logger.info(f" Batch size = {batch_size}") model.eval() self.callback_handler.eval_dataloader = dataloader # Do this before wrapping. eval_dataset = getattr(dataloader, "dataset", None) if is_torch_tpu_available(): dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader( args.device) if args.past_index >= 0: self._past = None # Initialize containers # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps) losses_host = None preds_host = None labels_host = None inputs_host = None # losses/preds/labels on CPU (final containers) all_losses = None all_preds = None all_labels = None all_inputs = None # Will be useful when we have an iterable dataset so don't know its length. observed_num_examples = 0 # Main evaluation loop for step, inputs in enumerate(dataloader): # Update the observed num examples observed_batch_size = find_batch_size(inputs) if observed_batch_size is not None: observed_num_examples += observed_batch_size # For batch samplers, batch_size is not known by the dataloader in advance. if batch_size is None: batch_size = observed_batch_size # Prediction step loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) inputs_decode = self._prepare_input(inputs[ "input_ids"]) if args.include_inputs_for_metrics else None if is_torch_tpu_available(): xm.mark_step() # Update containers on host if loss is not None: losses = self._nested_gather(loss.repeat(batch_size)) losses_host = losses if losses_host is None else torch.cat( (losses_host, losses), dim=0) if labels is not None: labels = self._pad_across_processes(labels) labels = self._nested_gather(labels) labels_host = labels if labels_host is None else nested_concat( labels_host, labels, padding_index=-100) if inputs_decode is not None: inputs_decode = self._pad_across_processes(inputs_decode) inputs_decode = self._nested_gather(inputs_decode) inputs_host = ( inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) ) if logits is not None: logits = self._pad_across_processes(logits) logits = self._nested_gather(logits) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) preds_host = logits if preds_host is None else nested_concat( preds_host, logits, padding_index=-100) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. if args.eval_accumulation_steps is not None and ( step + 1) % args.eval_accumulation_steps == 0: if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate( (all_losses, losses), axis=0) if preds_host is not None: logits = nested_numpify(preds_host) all_preds = logits if all_preds is None else nested_concat( all_preds, logits, padding_index=-100) if inputs_host is not None: inputs_decode = nested_numpify(inputs_host) all_inputs = ( inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) ) if labels_host is not None: labels = nested_numpify(labels_host) all_labels = ( labels if all_labels is None else nested_concat( all_labels, labels, padding_index=-100) ) # Set back to None to begin a new accumulation losses_host, preds_host, inputs_host, labels_host = None, None, None, None if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate( (all_losses, losses), axis=0) if preds_host is not None: logits = nested_numpify(preds_host) all_preds = logits if all_preds is None else nested_concat( all_preds, logits, padding_index=-100) if inputs_host is not None: inputs_decode = nested_numpify(inputs_host) all_inputs = ( inputs_decode if all_inputs is None else nested_concat( all_inputs, inputs_decode, padding_index=-100) ) if labels_host is not None: labels = nested_numpify(labels_host) all_labels = labels if all_labels is None else nested_concat( all_labels, labels, padding_index=-100) # Number of samples if has_length(eval_dataset): num_samples = len(eval_dataset) # The instance check is weird and does not actually check for the type, but whether the dataset has the right # methods. Therefore we need to make sure it also has the attribute. elif isinstance(eval_dataset, IterableDatasetShard) and getattr( eval_dataset, "num_examples", 0) > 0: num_samples = eval_dataset.num_examples else: if has_length(dataloader): num_samples = self.num_examples(dataloader) else: # both len(dataloader.dataset) and len(dataloader) fail num_samples = observed_num_examples if num_samples == 0 and observed_num_examples > 0: num_samples = observed_num_examples # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of # samplers has been rounded to a multiple of batch_size, so we truncate. if all_losses is not None: all_losses = all_losses[:num_samples] if all_preds is not None: all_preds = nested_truncate(all_preds, num_samples) if all_labels is not None: all_labels = nested_truncate(all_labels, num_samples) if all_inputs is not None: all_inputs = nested_truncate(all_inputs, num_samples) # Metrics! doc_labels = eval_dataset.doc_labels eval_samples = eval_dataset.samples split = eval_dataset.split if self.args.joint_train: doc_id_to_name = eval_dataset.id_to_name else: doc_id_to_name = None # allow_singletons = eval_dataset.data_args.allow_singletons assert all_preds is not None metrics = self.my_compute_metrics(doc_labels, all_preds, eval_samples, split, doc_id_to_name) # if all_preds is not None and doc_labels is not None: # metrics = self.get_eval_metrics(doc_labels, all_preds, # eval_samples, split) # else: # metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if all_losses is not None: metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) if self.args.gradient_checkpointing: self.model.config.use_cache = False return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys: list of ignore keys Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step( model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys ) has_labels = "labels" in inputs inputs = self._prepare_inputs(inputs) # XXX: adapt synced_gpus for fairscale as well gen_kwargs = self._gen_kwargs.copy() gen_kwargs["max_length"] = ( gen_kwargs["max_length"] if gen_kwargs.get( "max_length") is not None else self.model.config.max_length ) gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get( "num_beams") is not None else self.model.config.num_beams ) default_synced_gpus = True if is_deepspeed_zero3_enabled() else False gen_kwargs["synced_gpus"] = ( gen_kwargs["synced_gpus"] if gen_kwargs.get( "synced_gpus") is not None else default_synced_gpus ) if "attention_mask" in inputs: gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) if "global_attention_mask" in inputs: gen_kwargs["global_attention_mask"] = inputs.get( "global_attention_mask", None) # prepare generation inputs # some encoder-decoder models can have varying encoder's and thus # varying model input names if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name: generation_inputs = inputs[self.model.encoder.main_input_name] else: generation_inputs = inputs[self.model.main_input_name] # add our logits_processor here if self.args.seq2seq_type != 'short_seq': if self.args.action_type == 'non_integer': special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS gen_kwargs['logits_processor'] = LogitsProcessorList(
[NonIntProcessor(generation_inputs, special_ids,
11
2023-10-17 17:39:16+00:00
16k
chenxn2020/GOSE
GOSEfinetune/models/LiLTRobertaLike/modeling_LiLTRobertaLike.py
[ { "identifier": "LiLTRobertaLikeConfig", "path": "GOSEfinetune/models/LiLTRobertaLike/configuration_LiLTRobertaLike.py", "snippet": "class LiLTRobertaLikeConfig(RobertaConfig):\n model_type = \"liltrobertalike\"\n\n def __init__(\n self,\n channel_shrink_ratio=4,\n max_2d_position_embeddings=1024,\n **kwargs\n ):\n super().__init__(\n **kwargs,\n )\n self.channel_shrink_ratio = channel_shrink_ratio\n self.max_2d_position_embeddings = max_2d_position_embeddings" }, { "identifier": "RE", "path": "GOSEfinetune/modules/decoders/RE.py", "snippet": "class RE(nn.Module):\n def __init__(self,args):\n super().__init__()\n self.cnt=0\n self.rounds = 5\n self.hidden_size = 960\n self.dim = self.hidden_size // 2\n self.hidden_dropout_prob = 0.5\n self.max_key = 64\n self.max_value = 64\n self.pooling_mode = 'max'\n self.softmax = nn.Softmax(dim=-1)\n self.loss_fct = CrossEntropyLoss()\n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n\n self.output = nn.Linear(self.dim,2)\n self.k_up = nn.Linear(2,self.dim)\n self.v_up = nn.Linear(2,self.dim)\n\n self.type_token = nn.Parameter(torch.normal(0,0.0002,size=(1,self.hidden_size)))\n self.biaffine_type = BiaffineAttention(self.dim , 3)\n self.biaffine = BiaffineAttention(self.dim , 2)\n self.ffn = nn.Linear(2,self.dim)\n self.ffn_type = nn.Linear(3,self.dim)\n self.attn_type = Attention_logits(self.dim,max_len=self.max_key)\n self.attn = Attention_logits(self.dim,max_len=self.max_key)\n\n self.key_type_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_type_ffn = nn.Linear(self.hidden_size,self.dim )\n self.key_multi_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_multi_ffn = nn.Linear(self.hidden_size,self.dim )\n self.key_single_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_single_ffn = nn.Linear(self.hidden_size,self.dim )\n\n self.classifier = nn.Linear(self.dim * 2,2)\n \"\"\"\n self.text_biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n \"\"\"\n def devide_entities(self,entities):\n \"\"\"\n devide entities into keys and values according there entities label\n return entities index\n \"\"\"\n entities_label_list = entities['label']\n key_index = [index for index,label in enumerate(entities_label_list) if label == 1]\n value_index = [index for index,label in enumerate(entities_label_list) if label == 2]\n\n key_num = len(key_index)\n value_num = len(value_index)\n\n M = self.max_key\n N = self.max_value \n\n if not key_num * value_num :\n key_index = [0]\n value_index = [1]\n \n if key_num > M :\n key_index = key_index[:M]\n if value_num > N:\n value_index = value_index[:N]\n\n return key_index, value_index \n\n def padding(self,data,N):\n # padding data 2,n,768 -> 2,N,768\n n = data.shape[0] \n dim = data.shape[1]\n device = data.device\n data = F.pad(data,(0,0,0,N-n))\n mask = torch.tensor([1.0]*n + [0.0]*(N-n),device=device)\n return data,mask \n\n def type_classifier(self,key,value,key_mask,value_mask):\n key = self.key_type_ffn(key)\n value = self.value_type_ffn(value)\n \n M = self.max_key\n N = self.max_value + 1\n logits_mask = key_mask.unsqueeze(2).repeat(1,1,N) * \\\n value_mask.unsqueeze(1).repeat(1,M,1) \n for i in range(self.rounds):\n \n logits = self.biaffine_type(key.unsqueeze(2).repeat(1,1,N,1),\n value.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n # B K V H\n logits = self.ffn_type(logits)\n logits = self.attn_type(logits,logits_mask)\n det_key,det_value = self.pooling(logits,key_mask,value_mask)\n key += det_key\n value += det_value \n else: \n logits = logits * logits_mask.unsqueeze(3).repeat(1,1,1,3)\n return logits \n \n def multi_classifier(self,key,value,key_mask,value_mask):\n key = self.key_multi_ffn(key)\n value = self.value_multi_ffn(value)\n\n M = key.shape[1]\n N = value.shape[1]\n \n key = key.unsqueeze(2).repeat(1,1,N,1)\n value = value.unsqueeze(1).repeat(1,M,1,1)\n\n multi_logits = self.classifier(torch.cat([key,value],dim=-1))\n\n return multi_logits \n \n def single_classifier(self,key,value,key_mask,value_mask):\n key = self.key_single_ffn(key)\n value = self.value_single_ffn(value)\n \n M = key.shape[1]\n N = value.shape[1]\n \n logits_mask = key_mask.unsqueeze(2).repeat(1,1,N) * \\\n value_mask.unsqueeze(1).repeat(1,M,1) \n \n for i in range(self.rounds):\n logits = self.biaffine(key.unsqueeze(2).repeat(1,1,N,1),\n value.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n # B K V H\n logits = self.ffn(logits)\n logits = self.attn(logits,logits_mask)\n det_key,det_value = self.pooling(logits,key_mask,value_mask)\n key += det_key\n value += det_value \n else: \n logits = logits * logits_mask.unsqueeze(3).repeat(1,1,1,2) \n\n return logits \n \n def forward(self, hidden_state, entities, relations, bbox):\n self.cnt+=1\n #layout_emb,text_emb = hidden_state \n B, max_len, H = hidden_state.shape\n device = hidden_state.device\n M = self.max_key\n N = self.max_value\n loss = 0\n all_pred_relations = []\n\n batch = []\n for b in range(B):\n if len(entities[b]['start']) <= 2:\n entities[b] = {\"end\":[1,1],\"label\":[0,0],\"start\":[0,0]}\n \n key_index,value_index = self.devide_entities(entities[b])\n start_token_index = torch.tensor(entities[b]['start'])\n key_start_token = start_token_index[key_index]\n value_start_token = start_token_index[value_index]\n #b,2,len,dim\n key = hidden_state[b][key_start_token,:]\n value = hidden_state[b][value_start_token,:]\n\n key,key_mask = self.padding(key,self.max_key)\n value = torch.cat([self.type_token,value],dim=0)\n value,value_mask = self.padding(value,self.max_value+1)\n\n batch.append((key,value,key_mask,value_mask))\n \n \n org_key = torch.stack([item[0] for item in batch],dim=0)\n org_value = torch.stack([item[1] for item in batch],dim=0)\n key_mask = torch.stack([item[2] for item in batch],dim=0)\n value_mask = torch.stack([item[3] for item in batch],dim=0)\n\n type_logits = self.type_classifier(org_key,org_value,key_mask,value_mask)\n \"\"\"\n self.type_token 0 - no link \n 1 - single link \n 2 - multilink\n B M N+1 3/\n \"\"\"\n \n org_value = org_value[:,1:,:]\n value_mask = value_mask[:,1:]\n\n type_token = self.softmax(type_logits[:,:,0])\n key_type = type_token.argmax(dim=-1)\n #so far we can get key label to route for downstream processing\n type_drop = key_type == 0\n type_single = key_type == 1\n type_multi = key_type == 2\n\n #multi_key = org_key[type_multi]\n multi_logits = self.multi_classifier(org_key,org_value,key_mask,value_mask)\n\n key_mask = key_mask.bool() & type_single\n single_logits = self.single_classifier(org_key,org_value,key_mask,value_mask)\n\n type_loss = self.get_type_loss(type_logits,key_mask,entities,relations)\n multi_loss = self.get_multi_loss(multi_logits,entities,relations)\n single_loss = self.get_single_loss(single_logits,entities,relations)\n\n loss = type_loss + multi_loss + single_loss \n all_pred_relations = self.get_predicted_relations(logits,entities,relations,key_mask,value_mask)\n\n return loss,all_pred_relations\n\n\n def pooling(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n M = key_mask.shape[1]\n N = value_mask.shape[1]\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, N - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_type_loss(self,type_logits,key_mask,entities,relations):\n # logits 2,64,65,3\n logits = self.softmax(type_logits[:,:,0])\n B = logits.shape[0]\n device = logits.device\n key_mask = key_mask.bool()\n loss_fcn = CrossEntropyLoss()\n\n for b in range(B):\n logit = logits[b][key_mask[b]]\n\n from IPython import embed;embed()\n relations \n\n def get_loss(self,logits,entities,relations,key_mask,value_mask):\n loss_fcn = CrossEntropyLoss()\n B = logits.shape[0]\n device = logits.device\n loss = 0\n all_logits = []\n all_labels = []\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n logit = logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n M,N,_ = logit.shape \n\n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n true_label = []\n for i in range(len(key_list)):\n try:\n key = key_index.index(key_list[i])\n value = value_index.index(value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n labels = labels.view(-1).to(dtype=torch.long)\n logit = logit.view(M*N,-1).to(dtype=torch.float)\n \n\n all_logits.append(logit)\n all_labels.append(labels)\n\n all_logits = torch.cat(all_logits,0)\n all_labels = torch.cat(all_labels,0)\n\n loss = loss_fcn(all_logits+1e-10,all_labels)\n return loss \n \n def get_predicted_relations(self,logits,entities,relations,key_mask,value_mask):\n #from IPython import embed;embed()\n softmax = nn.Softmax(dim=-1)\n all_pred_relations = []\n \n device = logits.device\n B = logits.shape[0]\n \n\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n pred_relations = []\n logit = logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n \n M,N,_ = logit.shape\n \n key_index,value_index = self.devide_entities(entities[b])\n for index in range(M*N):\n key = index // N\n value = index % N\n pred_label = logit[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = key_index[key] \n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n \n pred_relations.append(rel)\n \n all_pred_relations.append(pred_relations)\n return all_pred_relations \n \n def get_loss_1(self,l_logits,t_logits,entities,relations,key_mask,value_mask):\n loss_fcn = CrossEntropyLoss()\n B = l_logits.shape[0]\n device = l_logits.device\n loss = 0\n all_layout_logits = []\n all_text_logits = []\n all_labels = []\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n l_logit = l_logits[b][key_mask[b]]\n l_logit = l_logit[:,value_mask[b]]\n t_logit = t_logits[b][key_mask[b]]\n t_logit = t_logit[:,value_mask[b]]\n M,N,_ = l_logit.shape \n\n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n true_label = []\n for i in range(len(key_list)):\n try:\n key = key_index.index(key_list[i])\n value = value_index.index(value_list[i])\n labels[key][value] = 1\n true_label.append((key*N+value))\n except:\n continue\n \n labels = labels.view(-1).to(dtype=torch.long)\n layout_logit = l_logit.view(M*N,-1).to(dtype=torch.float)\n text_logit = t_logit.view(M*N,-1).to(dtype=torch.float)\n\n all_layout_logits.append(layout_logit)\n all_text_logits.append(text_logit)\n all_labels.append(labels)\n\n all_layout_logits = torch.cat(all_layout_logits,0)\n all_text_logits = torch.cat(all_text_logits,0)\n all_labels = torch.cat(all_labels,0)\n\n layout_loss = loss_fcn(all_layout_logits+1e-10,all_labels)\n text_loss = loss_fcn(all_text_logits+1e-10,all_labels)\n\n loss = 2*layout_loss + text_loss \n return loss \n \n def get_predicted_relations_1(self,l_logits,t_logits,entities,relations,key_mask,value_mask):\n #from IPython import embed;embed()\n softmax = nn.Softmax(dim=-1)\n all_pred_relations = []\n \n device = l_logits.device\n B = l_logits.shape[0]\n \n\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n pred_relations = []\n l_logit = l_logits[b][key_mask[b]]\n l_logit = l_logit[:,value_mask[b]]\n t_logit = t_logits[b][key_mask[b]]\n t_logit = t_logit[:,value_mask[b]]\n M,N,_ = l_logit.shape\n \n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n for index in range(M*N):\n key = index // N\n value = index % N\n layout_pred_label = l_logit[key][value].argmax(-1)\n text_pred_label = t_logit[key][value].argmax(-1)\n\n if layout_pred_label * text_pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = key_index[key] \n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n \n pred_relations.append(rel)\n \n all_pred_relations.append(pred_relations)\n return all_pred_relations " }, { "identifier": "GOSE", "path": "GOSEfinetune/modules/decoders/gose.py", "snippet": "class GOSE(nn.Module):\n def __init__(self, args):\n super().__init__()\n #(rounds,num_heads)\n # self.rounds = 4\n self.args = args\n self.rounds = args.rounds+1\n self.norm = False\n if args.backbone_name == 'lilt':\n self.hidden_size = 960\n elif args.backbone_name == 'xlm':\n self.hidden_size = 768\n self.hidden_dropout_prob = 0.5\n #默认only-mean pooling\n self.pooling_mode = args.pooling_mode\n self.use_gam = args.use_gam\n self.loss_fct = CrossEntropyLoss()\n self.use_prefix = args.use_prefix\n #---对global-attention使用稀疏注意力\n self.use_global_mask = args.use_global_mask\n #--------\n self.use_gate = args.use_gate\n print(f\"**********************************Backbone: {args.backbone_name}****************************\")\n print(f\"**********************************Use_GAM: {self.use_gam}************************************\")\n print(f\"**********************************Use_Prefix: {self.use_prefix}********************************\")\n print(f\"**********************************Use_Gate: {self.use_gate}************************************\")\n # print(f\"**********************************Use_Global_Mask: {self.use_global_mask}**********************\")\n print(f\"**********************************Pooling_Mode: {self.pooling_mode}****************************\")\n print(f\"**********************************Iterative_Rounds: {self.rounds-1}****************************\")\n print(f\"**************************************************************\")\n print(f\"**********************************No_Iteration: {self.args.no_it}********************************\")\n print(f\"**********************************No_Global: {self.args.no_global}********************************\")\n print(f\"**********************************Window_size: {self.args.window_size}********************************\")\n # self.mode = 'only-mean'\n # self.mode = 'only-max'\n # self.mode = 'attn-max'\n\n\n \n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n self.elu=nn.ELU()\n self.biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n self.ffn = nn.Linear(2, self.hidden_size//2)\n self.ffn_key = nn.Linear(self.hidden_size, self.hidden_size//2)\n self.ffn_value = nn.Linear(self.hidden_size, self.hidden_size//2)\n\n # attention config\n self.dim = self.hidden_size //2\n self.num_heads = 1\n self.num_tokens = 8 # max_len = 8\n self.window_size = args.window_size # 8 # window_size * S = H \n self.qkv_bias = False\n self.drop = 0\n self.attn_drop = 0\n self.drop_path = 0\n self.max_len = args.max_len #64\n self.norm1 = nn.LayerNorm(self.dim)\n self.norm2 = nn.LayerNorm(self.dim)\n self.global_token_num = args.global_token_num\n print(f\"**********************************Global_token: {self.global_token_num}****************************\")\n self.global_token = nn.Parameter(torch.zeros(1, self.global_token_num, self.hidden_size //2))\n self.attn = Attention(self.dim,num_heads=self.num_heads, num_tokens=self.num_tokens, \n window_size=self.window_size,qkv_bias=self.qkv_bias, \n attn_drop=self.attn_drop, proj_drop=self.drop, args=args)\n\n self.cnt = 0\n self.loss_fcn = CrossEntropyLoss()\n self.normal = True\n self.dummy_vec = nn.Parameter(torch.Tensor(1, self.hidden_size//2))\n nn.init.normal_(self.dummy_vec)\n #----gate\n self.gru = GRU(self.hidden_size//2) \n #---layout-prefix-tuning\n self.axis_dis_fn = nn.Linear(1, self.hidden_size//12)\n self.axis_angle_fn = nn.Linear(1, self.hidden_size//12)\n \n def create_global_mask(self):\n global_mask = torch.zeros(self.global_token_num, self.max_len, self.max_len).cuda()\n step = self.num_tokens\n for idx in range(self.global_token_num):\n row_ids = idx // self.num_tokens\n column_ids = idx % self.num_tokens\n row_start = row_ids * step\n column_start = column_ids * step\n global_mask[idx, row_start:row_start+self.num_tokens,:] = 1\n global_mask[idx, :, column_start:column_start+self.num_tokens] = 1\n return global_mask\n \n def get_entities_kv_index_list(self, entities):\n\n M = self.max_len\n entities_label = entities['label']\n\n entities_key_index = [index for index,label in enumerate(entities_label) if label == 1 ]\n entities_value_index = [index for index,label in enumerate(entities_label) if label == 2 ] \n key_num, value_num = len(entities_key_index),len(entities_value_index)\n '''\n in re.py\n if len(all_possible_relations) == 0:\n all_possible_relations = set([(0, 1)])\n '''\n if key_num * value_num == 0:\n #print(\"all_possible_relations == 0\")\n entities_key_index = [0]\n entities_value_index = [1]\n if key_num > M :\n entities_key_index = entities_key_index[:M]\n self.normal = False\n if value_num > M :\n entities_value_index = entities_value_index[:M]\n self.normal = False\n\n return entities_key_index, entities_value_index\n\n \n def forward(self, hidden_state, entities,relations, bbox):\n #if self.cnt == 30: set the num + 1 which failed\n # from IPython import embed;embed()\n self.cnt += 1\n B ,_ ,H = hidden_state.shape\n M = self.max_len\n device = hidden_state.device\n\n loss = 0\n all_pred_relations = []\n\n # B len(entities)\n # entities_label = torch.stack([torch.tensor(dict['label']) for dict in entities],dim=0)\n # padding to max_len M 64\n \n key_repr_list = []\n value_repr_list = []\n key_mask_list = []\n value_mask_list = []\n key_bbox_list, value_bbox_list = [], []\n for b in range(B):\n #key_repr ~ N,H -> 64,H/2\n #value_repr ~ M,H -> 64,H/2\n if len(entities[b][\"start\"]) <= 2:\n entities[b] = {\"end\": [1, 1], \"label\": [0, 0], \"start\": [0, 0]}\n \n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n entities_first_token_index = torch.tensor(entities[b]['start'])\n \n entities_key_first_token_index = entities_first_token_index[entities_key_index]\n entities_value_first_token_index = entities_first_token_index[entities_value_index]\n key_repr = hidden_state[b][entities_key_first_token_index,:]\n value_repr = hidden_state[b][entities_value_first_token_index,:]\n \n key_num,value_num = key_repr.shape[0],value_repr.shape[0]\n # padding key_repr key_num,H -> max_len,H\n # generate mask shape like max_len,H\n \n key_mask_list.append(torch.tensor([[1.]] * key_num + [[0.]] * (M - key_num),device=device).repeat(1,H//2))\n value_mask_list.append(torch.tensor([[1.]] * value_num + [[0.]] * (M - value_num),device=device).repeat(1,H//2))\n # padding key_repr key_num,H -> max_len,H\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, M - value_num)))\n #----得到kv实体的bbox\n key_bbox = bbox[b][entities_key_first_token_index]\n value_bbox = bbox[b][entities_value_first_token_index]\n key_bbox_list.append(F.pad(key_bbox,(0, 0, 0, M - key_num)))\n value_bbox_list.append(F.pad(value_bbox,(0, 0, 0, M - value_num)))\n\n # batch max_len hidden_size\n key_repr = torch.stack(key_repr_list,dim=0) \n key_mask = torch.stack(key_mask_list,dim=0)\n \n value_repr = torch.stack(value_repr_list,dim=0)\n value_mask = torch.stack(value_mask_list,dim=0)\n \n\n #key_mask * value_mask -> table_mask B,M,H * B,M,H -> B M M H\n table_mask = key_mask.unsqueeze(2).repeat(1,1,M,1)\\\n *value_mask.unsqueeze(1).repeat(1,M,1,1)\n #---global_mask\n if self.use_global_mask:\n self.global_mask = self.create_global_mask()\n global_mask = self.global_mask.unsqueeze(0).repeat(B,1,1,1) #shape[bsz,global_token_num,M,M]\n # global_mask = global_mask.view(B, self.global_token_num, -1)\n else:\n global_mask = None\n \n \n key_mask = key_mask[:,:,0].bool()\n value_mask = value_mask[:,:,0].bool()\n key_ffn = self.ffn_key(key_repr)\n value_ffn = self.ffn_value(value_repr)\n \n if self.norm == True:\n key_ffn = self.norm1(key_repr)\n value_ffn = self.norm1(value_repr)\n global_token = self.global_token.expand(B, -1, -1)\n key_bbox = torch.stack(key_bbox_list, dim=0) \n value_bbox = torch.stack(value_bbox_list, dim=0) \n layout_repr = self.calc_layout(key_bbox, value_bbox)\n layout_repr = layout_repr * table_mask\n layout_repr = layout_repr.view(B,M*M,H//2)\n for i in range(self.rounds):\n '''\n method 1 with biaffine \n \n table_mask.shape B M M H/2 -> B M M H (M=64)\n table_logits.shape B M M H/2 -> B M M 2\n B M M 2 -> B M M H\n attention input B (64+1)*64 384\n table input 64 * 64 \n window_size 8\n token_num 64/8 * 64/8 = 64\n '''\n #key_ffn = self.ffn_key(key_repr)\n #value_ffn = self.ffn_value(value_repr)\n #key_ffn = self.ffn_key(key_ffn)\n #value_ffn = self.ffn_value(value_ffn)\n \n table_logits = self.biaffine(key_ffn.unsqueeze(2).repeat(1,1,M,1),\n value_ffn.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n table_logits = self.ffn(table_logits) * table_mask\n \n if self.use_gam:\n table_logits = table_logits.view(B,M*M,H//2)\n \n table_logits = torch.cat((global_token, table_logits), dim=1)\n if self.use_prefix:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=layout_repr, key_num=key_num, value_num=value_num)\n else:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=None)\n global_token_new = table_logits[:,:self.global_token_num,:]\n global_token = global_token + global_token_new\n table_logits = table_logits[:,self.global_token_num:,:]\n table_logits = table_logits.view(B,M,M,H//2)\n table_logits = table_logits * table_mask\n key_new, value_new = self.get_new_repr(table_logits, key_mask, value_mask)\n if self.norm == True:\n key_new = self.norm2(key_new)\n value_new = self.norm2(value_new)\n if self.use_gate:\n key_ffn = self.gru(key_ffn,key_new)\n value_ffn = self.gru(value_ffn,value_new)\n \n elif self.args.no_it:\n key_ffn = key_new\n value_ffn = value_new\n elif self.args.use_add:\n key_ffn = key_ffn + key_new\n value_ffn = value_ffn + value_new \n else:\n table_logits = table_logits * table_mask[:,:,:,:2]\n\n # table_logits M N 2\n # table_logits.unsqueeze(0)\n # batch_table_logits = table_logits if batch_table_logits == None else torch.cat((batch_table_logits,table_logits),dim=0)\n\n loss = self.get_loss(table_logits,entities,relations,key_mask,value_mask)\n all_pred_relations = self.get_predicted_relations(table_logits,entities,key_mask,value_mask, bbox)\n return loss,all_pred_relations\n \n def calc_layout(self, head_bbox, tail_bbox):\n bsz, num, _ = head_bbox.shape\n head_bbox = head_bbox.unsqueeze(2).repeat(1,1,num,1)\n tail_bbox = tail_bbox.unsqueeze(1).repeat(1,num,1,1)\n \n #-----中心点坐标特征\n head_bbox_center = torch.div(torch.cat(((head_bbox[:,:,:,0]+head_bbox[:,:,:,2]).view(-1,1), (head_bbox[:,:,:,1]+head_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n tail_bbox_center = torch.div(torch.cat(((tail_bbox[:,:,:,0]+tail_bbox[:,:,:,2]).view(-1,1), (tail_bbox[:,:,:,1]+tail_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n head_tail_center_dis, hea_tail_center_angle = self.axis_features(head_bbox_center, tail_bbox_center)\n head_tail_center_dis_feature = self.axis_dis_fn(head_tail_center_dis)\n head_tail_center_angle_feature = self.axis_angle_fn(hea_tail_center_angle)\n #-----左上点坐标特征\n head_bbox_left_top = torch.cat((head_bbox[:,:,:, 0].view(-1,1), head_bbox[:,:,:, 1].view(-1,1)), dim=1)\n tail_bbox_left_top = torch.cat((tail_bbox[:,:,:, 0].view(-1,1), tail_bbox[:,:,:, 1].view(-1,1)), dim=1)\n head_tail_lt_dis, hea_tail_lt_angle = self.axis_features(head_bbox_left_top, tail_bbox_left_top)\n head_tail_lt_dis_feature = self.axis_dis_fn(head_tail_lt_dis)\n hea_tail_lt_angle_feature = self.axis_angle_fn(hea_tail_lt_angle)\n #-----右下点坐标特征\n head_bbox_right_down = torch.cat((head_bbox[:,:,:, 2].view(-1,1), head_bbox[:,:,:, 3].view(-1,1)), dim=1)\n tail_bbox_right_down = torch.cat((tail_bbox[:,:,:, 2].view(-1,1), tail_bbox[:,:,:, 3].view(-1,1)), dim=1)\n head_tail_rd_dis, hea_tail_rd_angle = self.axis_features(head_bbox_right_down, tail_bbox_right_down)\n head_tail_rd_dis_feature = self.axis_dis_fn(head_tail_rd_dis)\n hea_tail_rd_angle_feature = self.axis_angle_fn(hea_tail_rd_angle)\n layout_repr = torch.cat(\n (head_tail_center_dis_feature, head_tail_center_angle_feature\n , head_tail_lt_dis_feature, hea_tail_lt_angle_feature\n , head_tail_rd_dis_feature, hea_tail_rd_angle_feature\n ),\n dim=-1\n )\n layout_repr = layout_repr.view(bsz, num, num, -1) \n return layout_repr\n \n \n \n def axis_features(self, tmp_bbox_1, tmp_bbox_2):\n tmp_bbox_distance = torch.pow(torch.sum(torch.pow(tmp_bbox_1 - tmp_bbox_2, 2), dim=1), 0.5) #欧氏距离\n tmp_bbox_distance = tmp_bbox_distance.view(-1, 1)\n ##########计算角度\n head_tail_x = tmp_bbox_1[:, 0] - tmp_bbox_2[:, 0]\n head_tail_y = tmp_bbox_1[:, 1] - tmp_bbox_2[:, 1]\n tmp_bbox_angle = torch.div(torch.atan2(head_tail_y, head_tail_x), 3.1416) #正切的角度\n tmp_bbox_angle = tmp_bbox_angle.view(-1, 1)\n return torch.div(tmp_bbox_distance, 1000), tmp_bbox_angle\n\n \n \n \n def get_new_repr(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, self.max_len - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, self.max_len - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_predicted_relations(self, logists,entities,key_mask,value_mask,bbox):\n all_pred_relations = []\n #logits.shape B,M,N,2\n #here is one batch so no dim B\n B,N,M,_=logists.shape\n for b in range(B):\n\n pred_relations = []\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n \n #---index指的是序列中的第几个实体\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n # if len(entities_key_index) > 64 or len(entities_value_index) > 64:\n # from IPython import embed;embed();exit()\n \n for index in range(M*N):\n key = index // M\n value = index % M\n pred_label = logist[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = entities_key_index[key]\n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = entities_value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n key_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"head_id\"]]].tolist()[:2]\n value_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"tail_id\"]]].tolist()[:2]\n rel[\"link\"] = (tuple(key_bbox_left_top), tuple(value_bbox_left_top))\n #--------\n pred_relations.append(rel)\n all_pred_relations.append(pred_relations)\n \n return all_pred_relations\n \n \n def get_loss(self,logists,entities,relations,key_mask,value_mask):\n #mask B M M H\n device = logists.device\n loss = 0\n B = key_mask.shape[0]\n all_logits = []\n all_labels = []\n for b in range(B):\n # 64,64 -> N,M\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n\n\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n entities_key_list = relations[b]['head']\n entities_value_list = relations[b]['tail']\n\n labels = torch.zeros(N*M).to(device).view(N,M)\n \n for i in range(len(entities_key_list)):\n try:\n key = entities_key_index.index(entities_key_list[i])\n value = entities_value_index.index(entities_value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n \n labels = labels.view(-1).to(dtype=torch.long)\n logist = logist.view(N*M,-1).to(dtype=torch.float)\n all_logits.append(logist)\n all_labels.append(labels)\n all_logits = torch.cat(all_logits, 0)\n all_labels = torch.cat(all_labels, 0)\n loss = self.loss_fcn(all_logits+1e-10, all_labels)\n if (torch.isnan(loss).sum().item() > 0):\n loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)\n \n return loss" }, { "identifier": "ReOutput", "path": "GOSEfinetune/utils.py", "snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n entities: Optional[Dict] = None\n relations: Optional[Dict] = None\n pred_relations: Optional[Dict] = None" } ]
import math import torch import torch.nn as nn import torch.utils.checkpoint import os from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN, gelu from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from .configuration_LiLTRobertaLike import LiLTRobertaLikeConfig from dataclasses import dataclass from typing import Dict, Optional, Tuple from transformers.file_utils import ModelOutput from ...modules.decoders.RE import RE from ...modules.decoders.gose import GOSE from ...utils import ReOutput
12,119
self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """
# coding=utf-8 logger = logging.get_logger(__name__) class LiLTRobertaLikeTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings, position_ids def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LiLTRobertaLikeLayoutEmbeddings(nn.Module): def __init__(self, config): super(LiLTRobertaLikeLayoutEmbeddings, self).__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.padding_idx = config.pad_token_id self.box_position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size//config.channel_shrink_ratio, padding_idx=self.padding_idx ) self.box_linear_embeddings = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size//config.channel_shrink_ratio) self.LayerNorm = nn.LayerNorm(config.hidden_size//config.channel_shrink_ratio, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, bbox=None, position_ids=None, ): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings) box_position_embeddings = self.box_position_embeddings(position_ids) spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings) spatial_position_embeddings = self.dropout(spatial_position_embeddings) return spatial_position_embeddings class LiLTRobertaLikeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.layout_query = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_key = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_value = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.channel_shrink_ratio = config.channel_shrink_ratio def transpose_for_scores(self, x, r=1): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size//r) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio) layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio) layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio) mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size) tmp_layout_attention_scores = layout_attention_scores / math.sqrt(self.attention_head_size//self.channel_shrink_ratio) attention_scores = tmp_attention_scores + tmp_layout_attention_scores layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) layout_attention_scores = layout_attention_scores + attention_mask # Normalize the attention scores to probabilities. layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. layout_attention_probs = self.dropout(layout_attention_probs) # Mask heads if we want to if head_mask is not None: layout_attention_probs = layout_attention_probs * head_mask layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer) layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size//self.channel_shrink_ratio,) layout_context_layer = layout_context_layer.view(*new_context_layer_shape) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = ((context_layer, layout_context_layer), attention_probs) if output_attentions else ((context_layer, layout_context_layer),) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class LiLTRobertaLikeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LiLTRobertaLikeSelfAttention(config) self.output = LiLTRobertaLikeSelfOutput(config) self.pruned_heads = set() ori_hidden_size = config.hidden_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio self.layout_output = LiLTRobertaLikeSelfOutput(config) config.hidden_size = ori_hidden_size def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, layout_inputs, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0][0], hidden_states) layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs) outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them return outputs class LiLTRobertaLikeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class LiLTRobertaLikeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LiLTRobertaLikeAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LiLTRobertaLikeAttention(config) self.intermediate = LiLTRobertaLikeIntermediate(config) self.output = LiLTRobertaLikeOutput(config) ori_hidden_size = config.hidden_size ori_intermediate_size = config.intermediate_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio self.layout_intermediate = LiLTRobertaLikeIntermediate(config) self.layout_output = LiLTRobertaLikeOutput(config) config.hidden_size = ori_hidden_size config.intermediate_size = ori_intermediate_size def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, layout_inputs, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0][0] layout_attention_output = self_attention_outputs[0][1] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layout_layer_output = apply_chunking_to_forward( self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output ) outputs = ((layer_output, layout_layer_output),) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def layout_feed_forward_chunk(self, attention_output): intermediate_output = self.layout_intermediate(attention_output) layer_output = self.layout_output(intermediate_output, attention_output) return layer_output class LiLTRobertaLikeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """
config_class = LiLTRobertaLikeConfig
0
2023-10-19 14:36:32+00:00
16k
BurgerBurgerBurger/AA
run.py
[ { "identifier": "add_args", "path": "args.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str)\n\n parser.add_argument(\"--train_file\", default=\"train_annotated.json\", type=str)\n parser.add_argument(\"--dev_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--test_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--pred_file\", default=\"results.json\", type=str)\n parser.add_argument(\"--save_path\", default=\"\", type=str)\n parser.add_argument(\"--load_path\", default=\"\", type=str)\n parser.add_argument(\"--results_path\", default=\"\", type=str)\n parser.add_argument(\"--teacher_sig_path\", default=\"\", type=str)\n parser.add_argument(\"--save_attn\", action=\"store_true\", help=\"Whether store the evidence distribution or not\")\n\n # graph\n parser.add_argument(\"--attn_heads\", default=2, type=int, help=\"Attention heads\")\n parser.add_argument(\"--gcn_layers\", default=2, type=int, help=\"GCN layers\")\n parser.add_argument(\"--iters\", default=2, type=int, help=\"Iteration\")\n parser.add_argument(\"--use_graph\", action=\"store_true\", help=\"Use graph\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--max_seq_length\", default=1024, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size for training.\")\n parser.add_argument(\"--test_batch_size\", default=8, type=int,\n help=\"Batch size for testing.\")\n parser.add_argument(\"--eval_mode\", default=\"single\", type=str,\n choices=[\"single\", \"fushion\"], \n help=\"Single-pass evaluation or evaluation with inference-stage fusion.\")\n parser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--num_labels\", default=4, type=int,\n help=\"Max number of labels in prediction.\")\n parser.add_argument(\"--max_sent_num\", default=25, type=int,\n help=\"Max number of sentences in each document.\")\n parser.add_argument(\"--evi_thresh\", default=0.2, type=float,\n help=\"Evidence Threshold. \")\n parser.add_argument(\"--evi_lambda\", default=0.1, type=float,\n help=\"Weight of relation-agnostic evidence loss during training. \")\n parser.add_argument(\"--attn_lambda\", default=1.0, type=float,\n help=\"Weight of knowledge distillation loss for attentions during training. \")\n parser.add_argument(\"--lr_transformer\", default=5e-5, type=float,\n help=\"The initial learning rate for transformer.\")\n parser.add_argument(\"--lr_added\", default=1e-4, type=float,\n help=\"The initial learning rate for added modules.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_ratio\", default=0.06, type=float,\n help=\"Warm up ratio for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=30.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--evaluation_steps\", default=-1, type=int,\n help=\"Number of training steps between evaluations.\")\n parser.add_argument(\"--seed\", type=int, default=66,\n help=\"random seed for initialization\")\n parser.add_argument(\"--num_class\", type=int, default=97,\n help=\"Number of relation types in dataset.\")\n\n return parser" }, { "identifier": "DocREModel", "path": "model.py", "snippet": "class DocREModel(nn.Module):\n\n def __init__(self, args, config, model, tokenizer,\n emb_size=768, block_size=64, num_labels=-1,\n max_sent_num=25, evi_thresh=0.2):\n super().__init__()\n self.config = config\n self.model = model\n self.tokenizer = tokenizer\n self.hidden_size = config.hidden_size\n\n self.loss_fnt = ATLoss()\n self.loss_fnt_evi = nn.KLDivLoss(reduction=\"batchmean\")\n\n self.head_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n self.tail_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n\n self.use_graph = args.use_graph\n if self.use_graph:\n self.head_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.tail_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.bilinear = nn.Linear(emb_size * block_size, config.num_labels)\n\n self.emb_size = emb_size\n self.block_size = block_size\n self.num_labels = num_labels\n self.total_labels = config.num_labels\n self.max_sent_num = max_sent_num\n self.evi_thresh = evi_thresh\n\n self.edges = ['self-loop', 'mention-anaphor', 'co-reference', 'inter-entity']\n\n if self.use_graph:\n self.graph_layers = nn.ModuleList(\n AttentionGCNLayer(self.edges, self.hidden_size, nhead=args.attn_heads, iters=args.gcn_layers) for _ in\n range(args.iters))\n\n def encode(self, input_ids, attention_mask):\n config = self.config\n if config.transformer_type == \"bert\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id]\n elif config.transformer_type == \"roberta\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id, config.sep_token_id]\n # process long documents.\n sequence_output, attention = process_long_input(self.model, input_ids, attention_mask, start_tokens, end_tokens)\n\n return sequence_output, attention\n\n def get_hrt(self, sequence_output, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n hss, tss, rss = [], [], []\n ht_atts = []\n\n for i in range(len(entity_pos)): # for each batch\n entity_embs, entity_atts = [], []\n\n # obtain entity embedding from mention embeddings.\n for eid, e in enumerate(entity_pos[i]): # for each entity\n if len(e) > 1:\n e_emb, e_att = [], []\n for mid, (start, end) in enumerate(e): # for every mention\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n e_emb.append(sequence_output[i, start + offset])\n e_att.append(attention[i, :, start + offset])\n\n if len(e_emb) > 0:\n e_emb = torch.logsumexp(torch.stack(e_emb, dim=0), dim=0)\n e_att = torch.stack(e_att, dim=0).mean(0)\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n else:\n start, end = e[0]\n if start + offset < c:\n e_emb = sequence_output[i, start + offset]\n e_att = attention[i, :, start + offset]\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n\n entity_embs.append(e_emb)\n entity_atts.append(e_att)\n\n entity_embs = torch.stack(entity_embs, dim=0) # [n_e, d]\n entity_atts = torch.stack(entity_atts, dim=0) # [n_e, h, seq_len]\n\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n\n # obtain subject/object (head/tail) embeddings from entity embeddings.\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n\n h_att = torch.index_select(entity_atts, 0, ht_i[:, 0])\n t_att = torch.index_select(entity_atts, 0, ht_i[:, 1])\n\n ht_att = (h_att * t_att).mean(1) # average over all heads\n ht_att = ht_att / (ht_att.sum(1, keepdim=True) + 1e-30)\n ht_atts.append(ht_att)\n\n # obtain local context embeddings.\n rs = contract(\"ld,rl->rd\", sequence_output[i], ht_att)\n\n hss.append(hs)\n tss.append(ts)\n rss.append(rs)\n\n rels_per_batch = [len(b) for b in hss]\n hss = torch.cat(hss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n tss = torch.cat(tss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n rss = torch.cat(rss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n ht_atts = torch.cat(ht_atts, dim=0) # (num_ent_pairs_all_batches, max_doc_len)\n\n return hss, rss, tss, ht_atts, rels_per_batch\n\n def graph(self, sequence_output, graphs, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n\n max_node = max([graph.shape[0] for graph in graphs])\n graph_fea = torch.zeros(n, max_node, self.config.hidden_size, device=sequence_output.device)\n graph_adj = torch.zeros(n, max_node, max_node, device=sequence_output.device)\n\n for i, graph in enumerate(graphs):\n nodes_num = graph.shape[0]\n graph_adj[i, :nodes_num, :nodes_num] = torch.from_numpy(graph)\n\n for i in range(len(entity_pos)):\n mention_index = 0\n for e in entity_pos[i]:\n for start, end in e:\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n graph_fea[i, mention_index, :] = sequence_output[i, start + offset]\n else:\n graph_fea[i, mention_index, :] = torch.zeros(self.config.hidden_size).to(sequence_output)\n mention_index += 1\n\n for graph_layer in self.graph_layers:\n graph_fea, _ = graph_layer(graph_fea, graph_adj)\n\n h_entity, t_entity = [], []\n for i in range(len(entity_pos)):\n entity_embs = []\n mention_index = 0\n for e in entity_pos[i]:\n e_emb = graph_fea[i, mention_index:mention_index + len(e), :]\n mention_index += len(e)\n\n e_emb = torch.logsumexp(e_emb, dim=0) if len(e) > 1 else e_emb.squeeze(0)\n entity_embs.append(e_emb)\n\n entity_embs = torch.stack(entity_embs, dim=0)\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n h_entity.append(hs)\n t_entity.append(ts)\n\n h_entity = torch.cat(h_entity, dim=0)\n t_entity = torch.cat(t_entity, dim=0)\n return h_entity, t_entity\n\n def forward_rel(self, hs, ts, rs, h, t):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs, h], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs, t], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_rel_no_graph(self, hs, ts, rs):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_evi(self, doc_attn, sent_pos, batch_rel, offset):\n max_sent_num = max([len(sent) for sent in sent_pos])\n rel_sent_attn = []\n for i in range(len(sent_pos)): # for each batch\n # the relation ids corresponds to document in batch i is [sum(batch_rel[:i]), sum(batch_rel[:i+1]))\n curr_attn = doc_attn[sum(batch_rel[:i]):sum(batch_rel[:i + 1])]\n curr_sent_pos = [torch.arange(s[0], s[1]).to(curr_attn.device) + offset for s in sent_pos[i]] # + offset\n\n curr_attn_per_sent = [curr_attn.index_select(-1, sent) for sent in curr_sent_pos]\n curr_attn_per_sent += [torch.zeros_like(curr_attn_per_sent[0])] * (max_sent_num - len(curr_attn_per_sent))\n sum_attn = torch.stack([attn.sum(dim=-1) for attn in curr_attn_per_sent],\n dim=-1) # sum across those attentions\n rel_sent_attn.append(sum_attn)\n\n s_attn = torch.cat(rel_sent_attn, dim=0)\n return s_attn\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n labels=None, # relation labels\n entity_pos=None,\n hts=None, # entity pairs\n sent_pos=None,\n sent_labels=None, # evidence labels (0/1)\n teacher_attns=None, # evidence distribution from teacher model\n graph=None,\n tag=\"train\"\n ):\n\n offset = 1 if self.config.transformer_type in [\"bert\", \"roberta\"] else 0\n output = {}\n sequence_output, attention = self.encode(input_ids, attention_mask)\n\n hs, rs, ts, doc_attn, batch_rel = self.get_hrt(sequence_output, attention, entity_pos, hts, offset)\n\n if self.use_graph:\n h, t = self.graph(sequence_output, graph, attention, entity_pos, hts, offset)\n logits = self.forward_rel(hs, ts, rs, h, t)\n else:\n logits = self.forward_rel_no_graph(hs, ts, rs)\n\n output[\"rel_pred\"] = self.loss_fnt.get_label(logits, num_labels=self.num_labels)\n\n if sent_labels is not None: # human-annotated evidence available\n\n s_attn = self.forward_evi(doc_attn, sent_pos, batch_rel, offset)\n output[\"evi_pred\"] = F.pad(s_attn > self.evi_thresh, (0, self.max_sent_num - s_attn.shape[-1]))\n\n if tag in [\"test\", \"dev\"]: # testing\n scores_topk = self.loss_fnt.get_score(logits, self.num_labels)\n output[\"scores\"] = scores_topk[0]\n output[\"topks\"] = scores_topk[1]\n\n if tag == \"infer\": # teacher model inference\n output[\"attns\"] = doc_attn.split(batch_rel)\n\n else: # training\n # relation extraction loss\n loss = self.loss_fnt(logits.float(), labels.float())\n output[\"loss\"] = {\"rel_loss\": loss.to(sequence_output)}\n\n if sent_labels is not None: # supervised training with human evidence\n\n idx_used = torch.nonzero(labels[:, 1:].sum(dim=-1)).view(-1)\n # evidence retrieval loss (kldiv loss)\n s_attn = s_attn[idx_used]\n sent_labels = sent_labels[idx_used]\n norm_s_labels = sent_labels / (sent_labels.sum(dim=-1, keepdim=True) + 1e-30)\n norm_s_labels[norm_s_labels == 0] = 1e-30\n s_attn[s_attn == 0] = 1e-30\n evi_loss = self.loss_fnt_evi(s_attn.log(), norm_s_labels)\n output[\"loss\"][\"evi_loss\"] = evi_loss.to(sequence_output)\n\n elif teacher_attns is not None: # self training with teacher attention\n\n doc_attn[doc_attn == 0] = 1e-30\n teacher_attns[teacher_attns == 0] = 1e-30\n attn_loss = self.loss_fnt_evi(doc_attn.log(), teacher_attns)\n output[\"loss\"][\"attn_loss\"] = attn_loss.to(sequence_output)\n\n return output" }, { "identifier": "set_seed", "path": "utils.py", "snippet": "def set_seed(args):\n seed = int(args.seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.use_deterministic_algorithms(True)" }, { "identifier": "collate_fn", "path": "utils.py", "snippet": "def collate_fn(batch):\n max_len = max([len(f[\"input_ids\"]) for f in batch])\n max_sent = max([len(f[\"sent_pos\"]) for f in batch])\n input_ids = [f[\"input_ids\"] + [0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n input_mask = [[1.0] * len(f[\"input_ids\"]) + [0.0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n labels = [f[\"labels\"] for f in batch]\n entity_pos = [f[\"entity_pos\"] for f in batch]\n hts = [f[\"hts\"] for f in batch]\n sent_pos = [f[\"sent_pos\"] for f in batch]\n sent_labels = [f[\"sent_labels\"] for f in batch if \"sent_labels\" in f]\n attns = [f[\"attns\"] for f in batch if \"attns\" in f]\n\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n input_mask = torch.tensor(input_mask, dtype=torch.float)\n\n labels = [torch.tensor(label) for label in labels]\n labels = torch.cat(labels, dim=0)\n\n if sent_labels != [] and None not in sent_labels:\n sent_labels_tensor = []\n for sent_label in sent_labels:\n sent_label = np.array(sent_label)\n sent_labels_tensor.append(np.pad(sent_label, ((0, 0), (0, max_sent - sent_label.shape[1]))))\n sent_labels_tensor = torch.from_numpy(np.concatenate(sent_labels_tensor, axis=0))\n else:\n sent_labels_tensor = None\n\n if attns:\n attns = [np.pad(attn, ((0, 0), (0, max_len - attn.shape[1]))) for attn in attns]\n attns = torch.from_numpy(np.concatenate(attns, axis=0))\n else:\n attns = None\n\n graph = [f[\"graph\"] for f in batch]\n\n output = (input_ids, input_mask, labels, entity_pos, hts, sent_pos, sent_labels_tensor, attns, graph)\n\n return output" }, { "identifier": "create_directory", "path": "utils.py", "snippet": "def create_directory(d):\n if d and not os.path.exists(d):\n os.makedirs(d)\n return d" }, { "identifier": "read_docred", "path": "prepro.py", "snippet": "def read_docred(file_in,\n tokenizer,\n transformer_type=\"bert\",\n max_seq_length=1024,\n teacher_sig_path=\"\",\n single_results=None):\n\n i_line = 0\n pos_samples = 0\n neg_samples = 0\n features = []\n\n if file_in == \"\":\n return None\n\n with open(file_in, \"r\", encoding='utf-8') as fh:\n data = json.load(fh)\n\n if teacher_sig_path != \"\": # load logits\n basename = os.path.splitext(os.path.basename(file_in))[0]\n attns_file = os.path.join(teacher_sig_path, f\"{basename}.attns\")\n attns = pickle.load(open(attns_file, 'rb'))\n\n if single_results != None:\n # reorder predictions as relations by title\n pred_pos_samples = 0\n pred_neg_samples = 0\n pred_rels = single_results\n title2preds = {}\n for pred_rel in pred_rels:\n if pred_rel[\"title\"] in title2preds:\n title2preds[pred_rel[\"title\"]].append(pred_rel)\n else:\n title2preds[pred_rel[\"title\"]] = [pred_rel]\n\n for doc_id in tqdm(range(len(data)), desc=\"Loading examples\"):\n\n sample = data[doc_id]\n entities = sample['vertexSet']\n entity_start, entity_end = [], []\n # record entities\n for entity in entities:\n for mention in entity:\n sent_id = mention[\"sent_id\"]\n pos = mention[\"pos\"]\n entity_start.append((sent_id, pos[0],))\n entity_end.append((sent_id, pos[1] - 1,))\n\n # add entity markers\n sents, sent_map, sent_pos = add_entity_markers(sample, tokenizer, entity_start, entity_end)\n\n # training triples with positive examples (entity pairs with labels)\n train_triple = {}\n\n if \"labels\" in sample:\n for label in sample['labels']:\n evidence = label['evidence']\n r = int(docred_rel2id[label['r']])\n\n # update training triples\n if (label['h'], label['t']) not in train_triple:\n train_triple[(label['h'], label['t'])] = [\n {'relation': r, 'evidence': evidence}]\n else:\n train_triple[(label['h'], label['t'])].append(\n {'relation': r, 'evidence': evidence})\n\n # get anaphors in the doc\n mentions = set([m['name'] for e in entities for m in e])\n\n potential_mention = get_anaphors(sample['sents'], mentions)\n\n entities.append(potential_mention)\n\n # entity start, end position\n entity_pos = []\n\n for e in entities:\n entity_pos.append([])\n for m in e:\n start = sent_map[m[\"sent_id\"]][m[\"pos\"][0]]\n end = sent_map[m[\"sent_id\"]][m[\"pos\"][1]]\n label = m[\"type\"]\n entity_pos[-1].append((start, end,))\n\n relations, hts, sent_labels = [], [], []\n\n for h, t in train_triple.keys(): # for every entity pair with gold relation\n relation = [0] * len(docred_rel2id)\n sent_evi = [0] * len(sent_pos)\n\n for mention in train_triple[h, t]: # for each relation mention with head h and tail t\n relation[mention[\"relation\"]] = 1\n for i in mention[\"evidence\"]:\n sent_evi[i] += 1\n\n relations.append(relation)\n hts.append([h, t])\n sent_labels.append(sent_evi)\n pos_samples += 1\n\n for h in range(len(entities) - 1):\n for t in range(len(entities) - 1):\n # all entity pairs that do not have relation are treated as negative samples\n if h != t and [h, t] not in hts: # and [t, h] not in hts:\n relation = [1] + [0] * (len(docred_rel2id) - 1)\n sent_evi = [0] * len(sent_pos)\n relations.append(relation)\n\n hts.append([h, t])\n sent_labels.append(sent_evi)\n neg_samples += 1\n\n graph = create_graph(entity_pos)\n\n assert len(relations) == (len(entities) - 1) * (len(entities) - 2)\n assert len(sents) < max_seq_length\n sents = sents[:max_seq_length - 2] # truncate, -2 for [CLS] and [SEP]\n input_ids = tokenizer.convert_tokens_to_ids(sents)\n input_ids = tokenizer.build_inputs_with_special_tokens(input_ids)\n\n feature = [{'input_ids': input_ids,\n 'entity_pos': entity_pos if entity_pos[-1] != [] else entity_pos[:-1],\n 'labels': relations,\n 'hts': hts,\n 'sent_pos': sent_pos,\n 'sent_labels': sent_labels,\n 'title': sample['title'],\n 'graph': graph\n }]\n\n if teacher_sig_path != '': # add evidence distributions from the teacher model\n feature[0]['attns'] = attns[doc_id][:, :len(input_ids)]\n\n if single_results is not None: # get pseudo documents from predictions of the single run\n offset = 1 if transformer_type in [\"bert\", \"roberta\"] else 0\n if sample[\"title\"] in title2preds:\n feature, pos_sample, neg_sample, = get_pseudo_features(feature[0], title2preds[sample[\"title\"]],\n entities, sent_map, offset, tokenizer)\n pred_pos_samples += pos_sample\n pred_neg_samples += neg_sample\n\n i_line += len(feature)\n features.extend(feature)\n\n print(\"# of documents {}.\".format(i_line))\n if single_results is not None:\n print(\"# of positive examples {}.\".format(pred_pos_samples))\n print(\"# of negative examples {}.\".format(pred_neg_samples))\n\n else:\n print(\"# of positive examples {}.\".format(pos_samples))\n print(\"# of negative examples {}.\".format(neg_samples))\n\n return features" }, { "identifier": "to_official", "path": "evaluation.py", "snippet": "def to_official(preds: list, features: list, evi_preds: list = [], scores: list = [], topks: list = []):\n '''\n Convert the predictions to official format for evaluating.\n Input:\n :preds: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :evi_preds: list of the evidence prediction corresponding to each relation triple prediction.\n :scores: list of scores of topk relation labels for each entity pair.\n :topks: list of topk relation labels for each entity pair.\n Output:\n :official_res: official results used for evaluation.\n :res: topk results to be dumped into file, which can be further used during fushion.\n '''\n\n h_idx, t_idx, title, sents = [], [], [], []\n\n for f in features:\n if \"entity_map\" in f:\n hts = [[f[\"entity_map\"][ht[0]], f[\"entity_map\"][ht[1]]] for ht in f[\"hts\"]]\n else:\n hts = f[\"hts\"]\n\n h_idx += [ht[0] for ht in hts]\n t_idx += [ht[1] for ht in hts]\n title += [f[\"title\"] for ht in hts]\n sents += [len(f[\"sent_pos\"])] * len(hts)\n\n official_res = []\n res = []\n\n for i in range(preds.shape[0]): # for each entity pair\n if scores != []:\n score = extract_relative_score(scores[i], topks[i])\n pred = topks[i]\n else:\n pred = preds[i]\n pred = np.nonzero(pred)[0].tolist()\n\n for p in pred: # for each predicted relation label (topk)\n curr_result = {\n 'title': title[i],\n 'h_idx': h_idx[i],\n 't_idx': t_idx[i],\n 'r': id2rel[p],\n }\n if evi_preds != []:\n curr_evi = evi_preds[i]\n evis = np.nonzero(curr_evi)[0].tolist()\n curr_result[\"evidence\"] = [evi for evi in evis if evi < sents[i]]\n if scores != []:\n curr_result[\"score\"] = score[np.where(topks[i] == p)].item()\n if p != 0 and p in np.nonzero(preds[i])[0].tolist():\n official_res.append(curr_result)\n res.append(curr_result)\n\n return official_res, res" }, { "identifier": "official_evaluate", "path": "evaluation.py", "snippet": "def official_evaluate(tmp, path, train_file=\"train_annotated.json\", dev_file=\"dev.json\"):\n '''\n Adapted from the official evaluation code\n '''\n truth_dir = os.path.join(path, 'ref')\n\n if not os.path.exists(truth_dir):\n os.makedirs(truth_dir)\n\n fact_in_train_annotated = gen_train_facts(os.path.join(path, train_file), truth_dir)\n fact_in_train_distant = gen_train_facts(os.path.join(path, \"train_distant.json\"), truth_dir)\n\n truth = json.load(open(os.path.join(path, dev_file)))\n\n std = {}\n tot_evidences = 0\n titleset = set([])\n\n title2vectexSet = {}\n\n for x in truth:\n title = x['title']\n titleset.add(title)\n\n vertexSet = x['vertexSet']\n title2vectexSet[title] = vertexSet\n\n if 'labels' not in x: # official test set from DocRED\n continue\n\n for label in x['labels']:\n r = label['r']\n h_idx = label['h']\n t_idx = label['t']\n std[(title, r, h_idx, t_idx)] = set(label['evidence'])\n tot_evidences += len(label['evidence'])\n\n tot_relations = len(std)\n tmp.sort(key=lambda x: (x['title'], x['h_idx'], x['t_idx'], x['r']))\n submission_answer = [tmp[0]]\n\n for i in range(1, len(tmp)):\n x = tmp[i]\n y = tmp[i - 1]\n if (x['title'], x['h_idx'], x['t_idx'], x['r']) != (y['title'], y['h_idx'], y['t_idx'], y['r']):\n submission_answer.append(tmp[i])\n\n correct_re = 0\n correct_evidence = 0\n pred_evi = 0\n\n correct_in_train_annotated = 0\n correct_in_train_distant = 0\n titleset2 = set([])\n for x in submission_answer:\n title = x['title']\n h_idx = x['h_idx']\n t_idx = x['t_idx']\n r = x['r']\n titleset2.add(title)\n if title not in title2vectexSet:\n continue\n vertexSet = title2vectexSet[title]\n\n if 'evidence' in x: # and (title, h_idx, t_idx) in std:\n evi = set(x['evidence'])\n else:\n evi = set([])\n pred_evi += len(evi)\n\n if (title, r, h_idx, t_idx) in std:\n correct_re += 1\n stdevi = std[(title, r, h_idx, t_idx)]\n correct_evidence += len(stdevi & evi)\n in_train_annotated = in_train_distant = False\n for n1 in vertexSet[h_idx]:\n for n2 in vertexSet[t_idx]:\n if (n1['name'], n2['name'], r) in fact_in_train_annotated:\n in_train_annotated = True\n if (n1['name'], n2['name'], r) in fact_in_train_distant:\n in_train_distant = True\n\n if in_train_annotated:\n correct_in_train_annotated += 1\n if in_train_distant:\n correct_in_train_distant += 1\n\n re_p = 1.0 * correct_re / len(submission_answer)\n re_r = 1.0 * correct_re / tot_relations if tot_relations != 0 else 0\n if re_p + re_r == 0:\n re_f1 = 0\n else:\n re_f1 = 2.0 * re_p * re_r / (re_p + re_r)\n\n evi_p = 1.0 * correct_evidence / pred_evi if pred_evi > 0 else 0\n evi_r = 1.0 * correct_evidence / tot_evidences if tot_evidences > 0 else 0\n\n if evi_p + evi_r == 0:\n evi_f1 = 0\n else:\n evi_f1 = 2.0 * evi_p * evi_r / (evi_p + evi_r)\n\n re_p_ignore_train_annotated = 1.0 * (correct_re - correct_in_train_annotated) / (\n len(submission_answer) - correct_in_train_annotated + 1e-5)\n re_p_ignore_train = 1.0 * (correct_re - correct_in_train_distant) / (\n len(submission_answer) - correct_in_train_distant + 1e-5)\n\n if re_p_ignore_train_annotated + re_r == 0:\n re_f1_ignore_train_annotated = 0\n else:\n re_f1_ignore_train_annotated = 2.0 * re_p_ignore_train_annotated * re_r / (re_p_ignore_train_annotated + re_r)\n\n if re_p_ignore_train + re_r == 0:\n re_f1_ignore_train = 0\n else:\n re_f1_ignore_train = 2.0 * re_p_ignore_train * re_r / (re_p_ignore_train + re_r)\n\n return [re_p, re_r, re_f1], [evi_p, evi_r, evi_f1], \\\n [re_p_ignore_train_annotated, re_r, re_f1_ignore_train_annotated], \\\n [re_p_ignore_train, re_r, re_f1_ignore_train]" }, { "identifier": "merge_results", "path": "evaluation.py", "snippet": "def merge_results(pred: list, pred_pseudo: list, features: list, thresh: float = None):\n '''\n Merge relation predictions from the original document and psuedo documents.\n Input:\n :pred: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :pred_pseudo: list of dictionaries, each dictionary entry is a predicted relation triple from pseudo documents. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :thresh: threshold for selecting predictions.\n Output:\n :merged_res: list of merged relation predictions. Each relation prediction is a dictionay with keys (title, h_idx, t_idx, r).\n :thresh: threshold of selecting relation predictions.\n '''\n\n title2pred = get_title2pred(pred)\n title2pred_pseudo = get_title2pred(pred_pseudo)\n\n title2gt = get_title2gt(features)\n num_gt = sum([len(title2gt[t]) for t in title2gt])\n\n titles = list(title2pred.keys())\n cand = []\n merged_res = []\n correct, num_pred = 0, 0\n\n for t in titles:\n rels = title2pred[t]\n rels_pseudo = title2pred_pseudo[t] if t in title2pred_pseudo else {}\n\n union = set(rels.keys()) | set(rels_pseudo.keys())\n for r in union:\n if r in rels and r in rels_pseudo: # add those into predictions\n if rels[r] > 0 and rels_pseudo[r] > 0:\n merged_res.append({'title': t, 'h_idx': r[0], 't_idx': r[1], 'r': r[2]})\n num_pred += 1\n correct += r in title2gt[t]\n continue\n score = rels[r] + rels_pseudo[r]\n elif r in rels: # -10 for penalty\n score = rels[r] - 10\n elif r in rels_pseudo:\n score = rels_pseudo[r] - 10\n cand.append((r in title2gt[t], score, t, r[0], r[1], r[2]))\n\n if thresh != None:\n sorted_pred = sorted(cand, key=lambda x: x[1], reverse=True)\n last = min(filter(lambda x: x[1] > thresh, sorted_pred))\n until = sorted_pred.index(last)\n cand = sorted_pred[:until + 1]\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n return merged_res, thresh\n\n if cand != []:\n thresh, cand = select_thresh(cand, num_gt, correct, num_pred)\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n\n return merged_res, thresh" } ]
import argparse import os import numpy as np import torch import ujson as json import pandas as pd import pickle from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from args import add_args from model import DocREModel from utils import set_seed, collate_fn, create_directory from prepro import read_docred from evaluation import to_official, official_evaluate, merge_results from tqdm import tqdm
11,701
topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test": best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.test_file) else: best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.dev_file) else: best_re = best_evi = best_re_ign = [-1, -1, -1] output = { tag + "_rel": [i * 100 for i in best_re], tag + "_rel_ign": [i * 100 for i in best_re_ign], tag + "_evi": [i * 100 for i in best_evi], } scores = {"dev_F1": best_re[-1] * 100, "dev_evi_F1": best_evi[-1] * 100, "dev_F1_ign": best_re_ign[-1] * 100} if args.save_attn: attns_path = os.path.join(args.load_path, f"{os.path.splitext(args.test_file)[0]}.attns") print(f"saving attentions into {attns_path} ...") with open(attns_path, "wb") as f: pickle.dump(attns, f) return scores, output, official_results, results def dump_to_file(offi: list, offi_path: str, scores: list, score_path: str, results: list = [], res_path: str = "", thresh: float = None): ''' dump scores and (top-k) predictions to file. ''' print(f"saving official predictions into {offi_path} ...") json.dump(offi, open(offi_path, "w")) print(f"saving evaluations into {score_path} ...") headers = ["precision", "recall", "F1"] scores_pd = pd.DataFrame.from_dict(scores, orient="index", columns=headers) print(scores_pd) scores_pd.to_csv(score_path, sep='\t') if len(results) != 0: assert res_path != "" print(f"saving topk results into {res_path} ...") json.dump(results, open(res_path, "w")) if thresh is not None: thresh_path = os.path.join(os.path.dirname(offi_path), "thresh") if not os.path.exists(thresh_path): print(f"saving threshold into {thresh_path} ...") json.dump(thresh, open(thresh_path, "w")) return def main(): parser = argparse.ArgumentParser() parser = add_args(parser) args = parser.parse_args() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.transformer_type = args.transformer_type set_seed(args) read = read_docred config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id model = DocREModel(args, config, model, tokenizer, num_labels=args.num_labels, max_sent_num=args.max_sent_num, evi_thresh=args.evi_thresh) model.to(args.device) print('total parameters:', sum([np.prod(list(p.size())) for p in model.parameters() if p.requires_grad])) if args.load_path != "": # load model from existing checkpoint model_path = os.path.join(args.load_path, "best.ckpt") model.load_state_dict(torch.load(model_path)) if args.do_train: # Training
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) scaler = GradScaler() print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test": best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.test_file) else: best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.dev_file) else: best_re = best_evi = best_re_ign = [-1, -1, -1] output = { tag + "_rel": [i * 100 for i in best_re], tag + "_rel_ign": [i * 100 for i in best_re_ign], tag + "_evi": [i * 100 for i in best_evi], } scores = {"dev_F1": best_re[-1] * 100, "dev_evi_F1": best_evi[-1] * 100, "dev_F1_ign": best_re_ign[-1] * 100} if args.save_attn: attns_path = os.path.join(args.load_path, f"{os.path.splitext(args.test_file)[0]}.attns") print(f"saving attentions into {attns_path} ...") with open(attns_path, "wb") as f: pickle.dump(attns, f) return scores, output, official_results, results def dump_to_file(offi: list, offi_path: str, scores: list, score_path: str, results: list = [], res_path: str = "", thresh: float = None): ''' dump scores and (top-k) predictions to file. ''' print(f"saving official predictions into {offi_path} ...") json.dump(offi, open(offi_path, "w")) print(f"saving evaluations into {score_path} ...") headers = ["precision", "recall", "F1"] scores_pd = pd.DataFrame.from_dict(scores, orient="index", columns=headers) print(scores_pd) scores_pd.to_csv(score_path, sep='\t') if len(results) != 0: assert res_path != "" print(f"saving topk results into {res_path} ...") json.dump(results, open(res_path, "w")) if thresh is not None: thresh_path = os.path.join(os.path.dirname(offi_path), "thresh") if not os.path.exists(thresh_path): print(f"saving threshold into {thresh_path} ...") json.dump(thresh, open(thresh_path, "w")) return def main(): parser = argparse.ArgumentParser() parser = add_args(parser) args = parser.parse_args() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.transformer_type = args.transformer_type set_seed(args) read = read_docred config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id model = DocREModel(args, config, model, tokenizer, num_labels=args.num_labels, max_sent_num=args.max_sent_num, evi_thresh=args.evi_thresh) model.to(args.device) print('total parameters:', sum([np.prod(list(p.size())) for p in model.parameters() if p.requires_grad])) if args.load_path != "": # load model from existing checkpoint model_path = os.path.join(args.load_path, "best.ckpt") model.load_state_dict(torch.load(model_path)) if args.do_train: # Training
create_directory(args.save_path)
4
2023-10-20 05:53:25+00:00
16k
xingchenshanyao/YOLOP-E
lib/core/function.py
[ { "identifier": "ConfusionMatrix", "path": "lib/core/evaluate.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc=1, conf=0.25, iou_thres=0.45):\n nc = 10 # 20230904 nc是类别数\n self.matrix = np.zeros((nc + 1, nc + 1))\n # import pdb;pdb.set_trace()\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = general.box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n # import pdb;pdb.set_trace()\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[gc, detection_classes[m1[j]]] += 1 # correct\n else:\n # import pdb;pdb.set_trace()\n self.matrix[gc, self.nc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[self.nc, dc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def plot(self, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FN'] if labels else \"auto\",\n yticklabels=names + ['background FP'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n except Exception as e:\n pass\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "SegmentationMetric", "path": "lib/core/evaluate.py", "snippet": "class SegmentationMetric(object):\n '''\n imgLabel [batch_size, height(144), width(256)]\n confusionMatrix [[0(TN),1(FP)],\n [2(FN),3(TP)]]\n '''\n def __init__(self, numClass):\n self.numClass = numClass\n self.confusionMatrix = np.zeros((self.numClass,)*2)\n\n def pixelAccuracy(self):\n # return all class overall pixel accuracy\n # acc = (TP + TN) / (TP + TN + FP + TN)\n acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()\n return acc\n \n def lineAccuracy(self):\n Acc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=1) + 1e-12)\n return Acc[1]\n\n def classPixelAccuracy(self):\n # return each category pixel accuracy(A more accurate way to call it precision)\n # acc = (TP) / TP + FP\n classAcc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=0) + 1e-12)\n return classAcc\n\n def meanPixelAccuracy(self):\n classAcc = self.classPixelAccuracy()\n meanAcc = np.nanmean(classAcc)\n return meanAcc\n\n def meanIntersectionOverUnion(self):\n # Intersection = TP Union = TP + FP + FN\n # IoU = TP / (TP + FP + FN)\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n mIoU = np.nanmean(IoU)\n return mIoU\n \n def IntersectionOverUnion(self):\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n return IoU[1]\n\n def genConfusionMatrix(self, imgPredict, imgLabel):\n # remove classes from unlabeled pixels in gt image and predict\n # print(imgLabel.shape)\n mask = (imgLabel >= 0) & (imgLabel < self.numClass)\n label = self.numClass * imgLabel[mask] + imgPredict[mask]\n count = np.bincount(label, minlength=self.numClass**2)\n confusionMatrix = count.reshape(self.numClass, self.numClass)\n return confusionMatrix\n\n def Frequency_Weighted_Intersection_over_Union(self):\n # FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]\n freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)\n iu = np.diag(self.confusionMatrix) / (\n np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -\n np.diag(self.confusionMatrix))\n FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n return FWIoU\n\n\n def addBatch(self, imgPredict, imgLabel):\n assert imgPredict.shape == imgLabel.shape\n self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)\n\n def reset(self):\n self.confusionMatrix = np.zeros((self.numClass, self.numClass))" }, { "identifier": "non_max_suppression", "path": "lib/core/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):\n \"\"\"Performs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n detections with shape: nx6 (x1, y1, x2, y2, conf, cls)\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "check_img_size", "path": "lib/core/general.py", "snippet": "def check_img_size(img_size, s=32):\n # Verify img_size is a multiple of stride s\n new_size = make_divisible(img_size, int(s)) # ceil gs-multiple # new_size = 640\n if new_size != img_size:\n print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))\n return new_size" }, { "identifier": "scale_coords", "path": "lib/core/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "xyxy2xywh", "path": "lib/core/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "lib/core/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "box_iou", "path": "lib/core/general.py", "snippet": "def box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1]) #(x2-x1)*(y2-y1)\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)" }, { "identifier": "coco80_to_coco91_class", "path": "lib/core/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x" }, { "identifier": "plot_images", "path": "lib/core/general.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "ap_per_class", "path": "lib/core/general.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898\n s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)\n ap, p, r = np.zeros(s), np.zeros((unique_classes.shape[0], 1000)), np.zeros((unique_classes.shape[0], 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = (target_cls == c).sum() # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + 1e-16) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\n if plot and (j == 0):\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 score (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n i=r.mean(0).argmax()\n\n if plot:\n plot_pr_curve(px, py, ap, save_dir, names)\n\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')" }, { "identifier": "output_to_target", "path": "lib/core/general.py", "snippet": "def output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)" }, { "identifier": "time_synchronized", "path": "lib/utils/utils.py", "snippet": "def time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()" }, { "identifier": "plot_img_and_mask", "path": "lib/utils/plot.py", "snippet": "def plot_img_and_mask(img, mask, index,epoch,save_dir):\r\n classes = mask.shape[2] if len(mask.shape) > 2 else 1\r\n fig, ax = plt.subplots(1, classes + 1)\r\n ax[0].set_title('Input image')\r\n ax[0].imshow(img)\r\n if classes > 1:\r\n for i in range(classes):\r\n ax[i+1].set_title(f'Output mask (class {i+1})')\r\n ax[i+1].imshow(mask[:, :, i])\r\n else:\r\n ax[1].set_title(f'Output mask')\r\n ax[1].imshow(mask)\r\n plt.xticks([]), plt.yticks([])\r\n # plt.show()\r\n plt.savefig(save_dir+\"/batch_{}_{}_seg.png\".format(epoch,index))\r" }, { "identifier": "plot_one_box", "path": "lib/utils/plot.py", "snippet": "def plot_one_box(x, img, color=None, label=None, line_thickness=None):\r\n # Plots one bounding box on image img 在图像上画一个检测框\r\n tl = line_thickness or round(0.0001 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\r\n color = color or [random.randint(0, 255) for _ in range(3)]\r\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\r\n cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\r\n if label:\r\n tf = max(tl - 1, 1) # font thickness\r\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\r\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\r\n cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled\r\n print(label)\r\n cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\r" }, { "identifier": "show_seg_result", "path": "lib/utils/plot.py", "snippet": "def show_seg_result(img, result, index, epoch, save_dir=None, is_ll=False,palette=None,is_demo=False,is_gt=False):\r\n # img = mmcv.imread(img)\r\n # img = img.copy()\r\n # seg = result[0]\r\n if palette is None:\r\n palette = np.random.randint(\r\n 0, 255, size=(3, 3))\r\n palette[0] = [0, 0, 0]\r\n palette[1] = [0, 255, 0]\r\n palette[2] = [255, 0, 0]\r\n palette = np.array(palette)\r\n assert palette.shape[0] == 3 # len(classes)\r\n assert palette.shape[1] == 3\r\n assert len(palette.shape) == 2\r\n \r\n if not is_demo:\r\n color_seg = np.zeros((result.shape[0], result.shape[1], 3), dtype=np.uint8)\r\n for label, color in enumerate(palette):\r\n color_seg[result == label, :] = color\r\n else:\r\n color_area = np.zeros((result[0].shape[0], result[0].shape[1], 3), dtype=np.uint8)\r\n \r\n # for label, color in enumerate(palette):\r\n # color_area[result[0] == label, :] = color\r\n\r\n color_area[result[0] == 1] = [0, 255, 0]\r\n color_area[result[1] ==1] = [255, 0, 0]\r\n color_seg = color_area\r\n\r\n # convert to BGR\r\n color_seg = color_seg[..., ::-1]\r\n # print(color_seg.shape)\r\n color_mask = np.mean(color_seg, 2)\r\n img[color_mask != 0] = img[color_mask != 0] * 0.5 + color_seg[color_mask != 0] * 0.5\r\n # img = img * 0.5 + color_seg * 0.5\r\n img = img.astype(np.uint8)\r\n img = cv2.resize(img, (1280,720), interpolation=cv2.INTER_LINEAR)\r\n\r\n if not is_demo:\r\n if not is_gt:\r\n if not is_ll:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_da_segresult.png\".format(epoch,index), img)\r\n else:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_ll_segresult.png\".format(epoch,index), img)\r\n else:\r\n if not is_ll:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_da_seg_gt.png\".format(epoch,index), img)\r\n else:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_ll_seg_gt.png\".format(epoch,index), img) \r\n return img\r" } ]
import time import torch import numpy as np import json import random import cv2 import os import math import wandb from lib.core.evaluate import ConfusionMatrix,SegmentationMetric from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target from lib.utils.utils import time_synchronized from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result from threading import Thread from PIL import Image from torchvision import transforms from pathlib import Path from torch.cuda import amp from tqdm import tqdm from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,711
ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det):
id_dict_SDExpressway = { 0:'Car', 1:'Truck', 2:'Guidance Sign', 3:'Warning Sign', 4:'Pending Sign', 5:'Speed Limit Sign', 6:'Emergency Telephone Sign', 7:'Directional Sign', 8:'Straight Ahead Arrow', 9:'Straight or Right Turn Arrow'} def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup, writer_dict, logger, device, rank=-1): """ train for one epoch Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return total_loss, head_losses - writer_dict: outputs(2,) output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85] output[1] len:1, [2,256,256] output[2] len:1, [2,256,256] target(2,) target[0] [1,n,5] target[1] [2,256,256] target[2] [2,256,256] Returns: None """ batch_time = AverageMeter() # batch_time = <lib.core.function.AverageMeter object at 0x7f0255618970> data_time = AverageMeter() # data_time = <lib.core.function.AverageMeter object at 0x7f025561a4f0> losses = AverageMeter() # losses = <lib.core.function.AverageMeter object at 0x7f02402e7cd0> # switch to train mode model.train() start = time.time() # start = 1688805138.6791408 for i, (input, target, paths, shapes) in enumerate(train_loader): # i=0 # target = [tensor([[0.0000e+00,...335e-01]]), tensor([[[[1., 1., 1...., 0.]]]]), tensor([[[[1., 1., 1...., 0.]]]])] # paths = ('/home/xingchen/Study...3225df.jpg', '/home/xingchen/Study...49926c.jpg', ...) # shapes = (((720, 1280), ((0.5, 0.5), (0.0, 12.0))), ((...), (...)), ...) intermediate = time.time() # intermediate = 1688805496.5324085 #print('tims:{}'.format(intermediate-start)) num_iter = i + num_batch * (epoch - 1) # num_iter = 0 # num_batch = 4375 if num_iter < num_warmup: # warm up lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \ (1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine xi = [0, num_warmup] # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 # 偏置lr从0.1下降到lr0,所有其他lr从0.0上升到lr0 x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM]) data_time.update(time.time() - start) if not cfg.DEBUG: input = input.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target with amp.autocast(enabled=device.type != 'cpu'): outputs = model(input) # outputs = [[tensor([[[[[ 8.8806e...ackward0>), tensor([[[[[ 4.6631e...ackward0>), tensor([[[[[ 1.4758e...ackward0>)], tensor([[[[0.5151, 0...ackward0>), tensor([[[[0.4868, 0...ackward0>)] total_loss, head_losses = criterion(outputs, target, shapes,model) # print(head_losses) # compute gradient and do update step optimizer.zero_grad() scaler.scale(total_loss).backward() scaler.step(optimizer) scaler.update() if rank in [-1, 0]: # measure accuracy and record loss losses.update(total_loss.item(), input.size(0)) # _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(), # target.detach().cpu().numpy()) # acc.update(avg_acc, cnt) # measure elapsed time batch_time.update(time.time() - start) end = time.time() if i % cfg.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\t' \ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ 'Speed {speed:.1f} samples/s\t' \ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ 'Loss {loss.val:.5f} ({loss.avg:.5f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) # writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1,nc = 1): """ validata Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return - writer_dict: Return: None """ # setting max_stride = 32 weights = None save_dir = output_dir + os.path.sep + 'visualization' # save_dir = 'runs/BddDataset/_2023-07-09-09-50/visualization' if not os.path.exists(save_dir): os.mkdir(save_dir) # print(save_dir) _, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS) # batch_size = 16 test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS) # test_batch_size = 16 training = False is_coco = False #is coco dataset save_conf=False # save auto-label confidences verbose=False save_hybrid=False log_imgs,wandb = min(16,100), None nc = 10 #20230904 iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for [email protected]:0.95 niou = iouv.numel() # niou = 10 try: except ImportError: wandb = None log_imgs = 0 seen = 0 # import pdb;pdb.set_trace() confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix # confusion matrix 混合矩阵 da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix ll_metric = SegmentationMetric(2) #segment confusion matrix # names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} # names = {'0':0} names = id_dict_SDExpressway #20230904 colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # colors = [[191, 83, 111]] coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95') # s = ' Class Images Targets P R [email protected] [email protected]:.95' p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0. losses = AverageMeter() da_acc_seg = AverageMeter() da_IoU_seg = AverageMeter() da_mIoU_seg = AverageMeter() ll_acc_seg = AverageMeter() ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det):
det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round()
4
2023-10-24 02:08:25+00:00
16k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It includes methods for preprocessing, resizing, and normalizing audio data.\n Subclasses may override these methods to implement dataset-specific processing and resizing logic.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initialize an AudioDataset instance.\n\n Args:\n data_config (DictConfig): Configuration for loading the dataset, including paths, audio properties, etc.\n split (str): Specifies which split of the dataset to load (e.g., 'train', 'validation', 'test').\n evaluation (bool, optional): Indicates whether the dataset is for evaluation purposes. Defaults to False.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def normalize_audio(audio_np: np.ndarray, sample_rate: int) -> np.ndarray:\n \"\"\"Normalize the amplitude of the audio data to a standard range.\n\n This method utilizes PyDub's effects module to perform audio normalization.\n\n Args:\n audio_np (np.ndarray): Audio data represented as a NumPy array.\n sample_rate (int): The sample rate of the audio data.\n\n Returns:\n np.ndarray: The normalized audio data as a NumPy array.\n \"\"\"\n # Convert numpy array to AudioSegment\n audio_segment = AudioSegment(audio_np.tobytes(), frame_rate=int(sample_rate), sample_width=2, channels=1)\n\n # Normalize with PyDub\n normalized_audio_segment = effects.normalize(audio_segment)\n\n # Convert back to numpy\n normalized_audio_np = np.array(normalized_audio_segment.get_array_of_samples())\n\n return normalized_audio_np\n\n def _resize_op(self, audio: tf.Tensor, size: int) -> tf.Tensor:\n \"\"\"Resize the input audio to a specified size and normalize its amplitude to the range [0, 1].\n\n If the audio length is less than the specified size, zero padding is applied to reach the desired size.\n If the audio length is greater, it is truncated to the specified size.\n\n Args:\n audio (tf.Tensor): Input audio data as a TensorFlow tensor.\n size (int): The target size for the audio data.\n\n Returns:\n tf.Tensor: The resized and normalized audio data as a TensorFlow tensor.\n \"\"\"\n # Normalize dataset\n pylogger.info(\"Normalizing audio...\")\n audio = tf.cast(audio, dtype=tf.int16)\n # Calculate current length of the audio\n pylogger.info(\"Resizing audio to size {}...\".format(size))\n audio_length = tf.shape(audio)[0]\n audio = tf.cond(\n audio_length < size,\n lambda: tf.concat([audio, tf.zeros(size - audio_length, dtype=audio.dtype)], axis=0),\n lambda: audio[:size],\n )\n audio_np = tf.numpy_function(self.normalize_audio, [audio, self.data_config.audio_sample_rate], tf.int16)\n audio = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n audio = tf.cast(audio, dtype=tf.float32)\n pylogger.info(\"Converting audio to range [-1, 1]...\")\n max_intensity = self.data_config.audio_max_intensity\n audio = audio / max_intensity\n return audio\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocess the input audio data.\n\n This method resizes the audio data to a specified size based on the dataset configuration and normalizes the amplitude to the range [-1, +1].\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input audio data and any associated metadata.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed audio data and any associated metadata.\n \"\"\"\n pylogger.info(\"Preprocessing audios for split {}...\".format(self.split))\n audio = self._resize_op(\n audio=d[\"audio\"], size=int(self.data_config.audio_sample_rate * self.data_config.audio_max_duration)\n )\n audio = tf.reshape(\n tensor=audio,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Audio reshaped to shape {}...\".format(audio.shape))\n return dict(data=audio, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Postprocess the output audio data.\n\n This method applies the inverse of the preprocessing steps to revert the audio data to its original form.\n\n Args:\n batch_data (Any): A batch of audio data to postprocess.\n inverse_scaler (Callable): A function that applies the inverse of the preprocessing steps.\n\n Returns:\n Any: A batch of postprocessed audio data.\n \"\"\"\n max_intensity = self.data_config.audio_max_intensity\n batch_audio = inverse_scaler(batch_data)\n batch_audio = batch_audio * max_intensity\n batch_post_processed = tf.cast(batch_audio, tf.int16)\n audio_np = tf.numpy_function(\n self.normalize_audio, [batch_post_processed, self.data_config.audio_sample_rate], tf.int16\n )\n batch_post_processed = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n return batch_post_processed" }, { "identifier": "ImageDataset", "path": "src/functional_diffusion_processes/datasets/image_dataset.py", "snippet": "class ImageDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for handling image datasets.\n\n Provides a structured way to load, preprocess, and post-process image data.\n This class can be extended to handle specific image datasets as required.\n\n Attributes:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initializes the ImageDataset object with dataset configurations.\n\n Args:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def _resize_op(image: Any, size: int) -> Any:\n \"\"\"Resizes the input image to the specified size and normalizes its values to the range [0,1].\n\n Args:\n image (Any): A tensor representing the input image.\n size (int): The target size for each dimension of the output image.\n\n Returns:\n Any: A tensor representing the resized and normalized image.\n \"\"\"\n # convert to range [0,1]\n pylogger.info(\"Converting image to range [0,1]...\")\n image = tf.image.convert_image_dtype(image=image, dtype=tf.float32)\n\n # resize to size\n pylogger.info(\"Resizing image to size {}...\".format(size))\n\n image = tf.image.resize(images=image, size=[size, size])\n\n return image\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses the input data by resizing, possibly flipping, and applying uniform dequantization.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data with keys 'image' and optionally 'label'.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data, with keys 'data' and optionally 'label'.\n \"\"\"\n image = self._resize_op(image=d[\"image\"], size=self.data_config.image_width_size)\n\n pylogger.info(\"Preprocessing images for split {}...\".format(self.split))\n\n if self.data_config.random_flip and not self.evaluation:\n pylogger.info(\"Applying random flips...\")\n image = tf.image.random_flip_left_right(image=image, seed=self.data_config.seed)\n\n if self.data_config.uniform_dequantization:\n pylogger.info(\"Applying uniform dequantization...\")\n image = (\n tf.random.uniform(shape=image.shape, dtype=tf.float32, seed=self.data_config.seed) + image * 255.0\n ) / 256.0\n\n image = tf.reshape(\n tensor=image,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Image reshaped to shape {}...\".format(image.shape))\n\n return dict(data=image, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Post-processes the output data by reverting the preprocessing steps.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to invert the scaling applied to the data.\n\n Returns:\n Any: A batch of postprocessed data, arranged in a grid for visualization.\n \"\"\"\n batch_post_processed = make_grid_image(\n ndarray=process_images(images=batch_data),\n inverse_scaler=inverse_scaler,\n )\n return batch_post_processed" }, { "identifier": "BaseDataset", "path": "src/functional_diffusion_processes/datasets/base_dataset.py", "snippet": "class BaseDataset(abc.ABC):\n \"\"\"Abstract base class for defining datasets.\n\n Provides a template for loading, preprocessing, and iterating over datasets.\n It encapsulates common dataset configurations and operations while allowing for dataset-specific\n preprocessing and post-processing through abstract methods.\n\n Attributes:\n dataset_builder: A builder object for loading the dataset.\n data_config (DictConfig): Configuration parameters for the dataset.\n split (str): Specifies which split of the dataset to load, e.g., 'train', 'validation', or 'test'.\n evaluation (bool): Indicates whether the dataset is for evaluation purposes.\n dataset_options: Options for configuring the dataset pipeline.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Abstract base class for defining datasets.\n\n This class provides a skeleton for defining datasets, with abstract methods for\n preprocessing data, generating batches of data, and resizing images. Subclasses\n must implement these methods to define their specific datasets.\n\n Args:\n data_config (DictConfig): A dictionary-like object containing the configuration for\n loading the dataset.\n\n split (str): A string specifying which split of the dataset to load.\n\n evaluation (bool): A boolean specifying whether the dataset is for evaluation purposes.\n \"\"\"\n self.dataset_builder = None\n self.data_config = data_config\n self.split = split\n self.evaluation = evaluation\n self.dataset_options = tf.data.Options()\n self.dataset_options.experimental_optimization.map_parallelization = True\n self.dataset_options.experimental_threading.private_threadpool_size = 48\n self.dataset_options.experimental_threading.max_intra_op_parallelism = 1\n\n @abc.abstractmethod\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Abstract method for preprocessing input data.\n\n Subclasses should override this method to implement dataset-specific preprocessing.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data.\n \"\"\"\n raise NotImplementedError(\"Subclasses must implement preprocess_fn method.\")\n\n @abc.abstractmethod\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Abstract method for postprocessing output data.\n\n Subclasses should override this method to implement dataset-specific post-processing.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to inverse the scaling of the data.\n\n Returns:\n Any: A dictionary containing the postprocessed data.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the postprocess_fn method.\")\n\n def _generator(self) -> Iterator[Any]:\n \"\"\"Generate batches of preprocessed data.\n\n Loads the dataset, shuffles the data, applies preprocessing, and batches the data.\n Subclasses might override this method to implement dataset-specific batching logic.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n # load the dataset\n if isinstance(self.dataset_builder, tfds.core.DatasetBuilder):\n read_config = tfds.ReadConfig(options=self.dataset_options)\n if self.data_config.download:\n self.dataset_builder.download_and_prepare()\n ds = self.dataset_builder.as_dataset(\n split=self.split,\n shuffle_files=False,\n read_config=read_config,\n as_supervised=False,\n )\n else:\n ds = self.dataset_builder.with_options(options=self.dataset_options)\n\n ds = ds.shuffle(buffer_size=10000, seed=self.data_config.seed)\n\n # apply the preprocessing function to each element in the dataset\n ds = ds.map(map_func=self.preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # determine the batch size per device\n ds = ds.batch(batch_size=self.data_config.batch_size, drop_remainder=True)\n ds = ds.batch(batch_size=jax.device_count(), drop_remainder=True)\n\n ds = ds.repeat(count=100000 if not self.evaluation else 1)\n\n return iter(ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE))\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Return an iterator that generates batches of preprocessed data.\n\n Calls the `_generator` method to obtain an iterator for generating preprocessed data batches.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n return self._generator()\n\n def __len__(self) -> int:\n \"\"\"Return the number of examples in the dataset.\n\n Obtains the total number of examples in the specified dataset split from the dataset builder's info attribute.\n\n Returns:\n int: The number of examples in the dataset.\n \"\"\"\n return self.dataset_builder.info.splits[self.split].num_examples" }, { "identifier": "Loss", "path": "src/functional_diffusion_processes/losses/base_loss.py", "snippet": "class Loss(abc.ABC):\n \"\"\"Abstract class representing a loss function.\n\n Provides a framework for defining custom loss functions by enforcing the implementation\n of `construct_loss_fn` method in any derived classes. This class holds a reference to\n a stochastic differential equation (SDE) object which is used to calculate the weight factor for the loss.\n\n Attributes:\n sde (SDE): The stochastic differential equation instance associated with this loss.\n \"\"\"\n\n def __init__(self, sde: SDE) -> None:\n \"\"\"Initializes the Loss instance with a given SDE.\n\n Args:\n sde (SDE): An SDE instance which might be used in the loss computation.\n \"\"\"\n self.sde = sde\n\n def construct_loss_fn(self, model: Any) -> Callable:\n \"\"\"Abstract method to construct a loss function for a given model.\n\n This method should be implemented by any derived class to define the loss\n computation specific to the type of loss being implemented.\n\n Args:\n model (Any): The model for which to construct the loss function.\n\n Returns:\n Callable: A callable representing the constructed loss function.\n\n Raises:\n NotImplementedError: If the method is not implemented by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the construct_loss_fn method.\")" }, { "identifier": "FIDMetric", "path": "src/functional_diffusion_processes/metrics/fid_metric.py", "snippet": "class FIDMetric:\n \"\"\"Class for computing the Frechet Inception Distance (FID) metric.\n\n This class facilitates the computation of the FID metric, which measures the similarity between two distributions of images.\n It precomputes features for the real dataset using a specified Inception feature extractor and provides methods to compute\n and store features for generated images, and to compute the FID and Inception Score (IS).\n\n Attributes:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n generated_pools (list): List to store features of generated images.\n generated_logits (list): List to store logits of generated images.\n real_features (dict): Dictionary to store precomputed features of real dataset.\n \"\"\"\n\n def __init__(\n self,\n metric_config: DictConfig,\n feature_extractor: InceptionFeatureExtractor,\n dataset: BaseDataset,\n ) -> None:\n \"\"\"Initializes the FIDMetric class with specified configurations, feature extractor, and dataset.\n\n Args:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n \"\"\"\n self.metric_config = metric_config\n self.feature_extractor = feature_extractor\n self.dataset = dataset\n self.generated_pools = []\n self.generated_logits = []\n try:\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n except FileNotFoundError:\n self._precompute_features(\n dataset_name=metric_config.dataset_name,\n save_path=metric_config.real_features_path,\n )\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n\n def _precompute_features(self, dataset_name: str, save_path: str) -> None:\n \"\"\"Precomputes and saves features for the real dataset.\n\n Args:\n dataset_name (str): Name of the dataset.\n save_path (str): Path where the computed features will be saved.\n \"\"\"\n tf.io.gfile.makedirs(path=save_path)\n\n tf.io.gfile.makedirs(os.path.join(save_path, f\"{dataset_name.lower()}_clean\"))\n\n # Use the feature extractor to compute features for the real dataset\n all_pools = self.feature_extractor.extract_features(\n dataset=self.dataset, save_path=save_path, dataset_name=dataset_name\n )\n\n # Save latent represents of the Inception network to disk or Google Cloud Storage\n filename = f\"{dataset_name.lower()}_stats.npz\"\n\n if jax.host_id() == 0:\n pylogger.info(\"Saving real dataset stats to: %s\" % os.path.join(save_path, filename))\n\n with tf.io.gfile.GFile(os.path.join(save_path, filename), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=all_pools)\n f_out.write(io_buffer.getvalue())\n\n def compute_fid(self, eval_dir, num_sampling_round) -> Tuple[float, float]:\n \"\"\"Computes the FID and Inception Score (IS) for the generated and real images.\n\n Args:\n eval_dir (str): Directory path for evaluation.\n num_sampling_round (int): Number of sampling rounds.\n\n Returns:\n Tuple[float, float]: A tuple containing the FID and Inception Score.\n \"\"\"\n real_pools = self.real_features[\"pool_3\"]\n if not self.feature_extractor.inception_v3 and not self.feature_extractor.inception_v3 == \"lenet\":\n if len(self.generated_logits) == 0 or len(self.generated_pools) == 0:\n if jax.host_id() == 0:\n # Load all statistics that have been previously computed and saved for each host\n for host in range(jax.host_count()):\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n wait_message = False\n while len(stats) < num_sampling_round:\n if not wait_message:\n print(\"Waiting for statistics on host %d\" % (host,))\n wait_message = True\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n time.sleep(10)\n\n for stat_file in stats:\n with tf.io.gfile.GFile(stat_file, \"rb\") as fin:\n stat = np.load(fin)\n\n self.generated_pools.append(stat[\"pool_3\"])\n self.generated_logits.append(stat[\"logits\"])\n\n all_logits = np.concatenate(self.generated_logits, axis=0)[: self.metric_config.num_samples]\n inception_score = tfgan.eval.classifier_score_from_logits(logits=all_logits)\n else:\n inception_score = -1\n\n all_pools = np.concatenate(self.generated_pools, axis=0)[: self.metric_config.num_samples]\n\n fid = tfgan.eval.frechet_classifier_distance_from_activations(activations1=real_pools, activations2=all_pools)\n\n return fid, inception_score\n\n def compute_and_store_generated_features(self, images: Any, sample_dir: str, round_num: int) -> None:\n \"\"\"Computes features for the generated images and stores them in a specified directory.\n\n Args:\n images (Any): Tensor representing the generated images.\n sample_dir (str): Directory where the features will be stored.\n round_num (int): Round number in the training process.\n \"\"\"\n latents = self.feature_extractor.extract_features(images)\n\n self.generated_pools.append(latents[\"pool_3\"])\n\n gc.collect()\n\n if self.feature_extractor.model_name == \"inception\" or self.feature_extractor.inception_v3:\n self.generated_logits.append(latents[\"logits\"])\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(\n io_buffer,\n pool_3=latents[\"pool_3\"],\n logits=latents[\"logits\"],\n )\n\n f_out.write(io_buffer.getvalue())\n\n elif self.feature_extractor.model_name == \"lenet\":\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=latents[\"pool_3\"])\n f_out.write(io_buffer.getvalue())" }, { "identifier": "Sampler", "path": "src/functional_diffusion_processes/samplers/base_sampler.py", "snippet": "class Sampler(abc.ABC):\n \"\"\"Abstract base class for creating sampler objects.\n\n This class serves as a template for creating sampler objects which are\n designed to generate samples of a stochastic process governed by a\n specified stochastic differential equation (SDE). The process of sampling\n is carried out by employing specified predictor and corrector methods.\n\n Attributes:\n predictor (Predictor): The predictor method to be used in the sampling process.\n corrector (Corrector): The corrector method to be used in the sampling process.\n sde (SDE): The stochastic differential equation governing the process to be sampled.\n sampler_config (DictConfig): Configuration settings for the sampler.\n\n Methods:\n make_sampler(predict_fn: Callable) -> Callable:\n Abstract method to create a sampling function based on the specified predictor,\n corrector, and SDE.\n \"\"\"\n\n def __init__(self, predictor: Predictor, corrector: Corrector, sde: SDE, sampler_config: DictConfig) -> None:\n \"\"\"Initializes the Sampler object with specified predictor, corrector, SDE, and configuration.\n\n Args:\n predictor (Predictor): The predictor method for the sampler.\n corrector (Corrector): The corrector method for the sampler.\n sde (SDE): The stochastic differential equation governing the process.\n sampler_config (DictConfig): Configuration settings for the sampler.\n \"\"\"\n super().__init__()\n self.predictor = predictor\n self.corrector = corrector\n self.sampler_config = sampler_config\n self.sde = sde\n\n def make_sampler(self, predict_fn: Callable, auxiliary_fn: Union[Any, Callable]) -> Callable:\n \"\"\"Abstract method to create a sampler function.\n\n This method is intended to be overridden by derived classes to provide\n specific implementations for creating a sampler function. The sampler\n function will utilize the specified predictor and corrector methods\n along with the provided SDE to generate samples of the stochastic process.\n\n Args:\n predict_fn (Callable): The model prediction function.\n auxiliary_fn (Callable): The auxiliary prediction function for the model.\n\n Returns:\n Callable: The constructed sampling function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the make_sampler method.\")" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "filter_mask", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def filter_mask(shape, radius):\n device_num, batch_size, rows, cols, n_channels = shape\n crow, ccol = int(rows / 2), int(cols / 2)\n center = [crow, ccol]\n x, y = jnp.ogrid[:rows, :cols]\n mask_area = (x - center[0]) ** 2 + (y - center[1]) ** 2 >= radius * radius\n mask = jnp.ones_like(mask_area)\n mask = jnp.where(mask_area, 0, mask)\n mask = mask.reshape(1, 1, rows, cols, 1)\n mask = jnp.repeat(mask, device_num, axis=0)\n mask = jnp.repeat(mask, batch_size, axis=1)\n mask = jnp.repeat(mask, n_channels, axis=4)\n return mask" }, { "identifier": "make_grid_image", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def make_grid_image(ndarray: Any, inverse_scaler: Callable, padding: int = 2, pad_value: float = 0.0) -> Any:\n \"\"\"Make a grid image from a Numpy Array.\n\n Args:\n ndarray: The Numpy Array.\n inverse_scaler: The inverse scaler.\n padding: The padding.\n pad_value: The padding value.\n\n Returns:\n The grid image.\n \"\"\"\n ndarray = jnp.asarray(ndarray)\n\n if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images\n ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)\n\n n_row = int(np.sqrt(ndarray.shape[0]))\n # make the mini-batch of images into a grid\n n_maps = ndarray.shape[0]\n x_maps = min(n_row, n_maps)\n ymaps = int(math.ceil(float(n_maps) / x_maps))\n height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)\n num_channels = ndarray.shape[3]\n grid = np.full((height * ymaps + padding, width * x_maps + padding, num_channels), pad_value).astype(np.float32)\n k = 0\n for y in range(ymaps):\n for x in range(x_maps):\n if k >= n_maps:\n break\n grid[\n y * height + padding : (y + 1) * height,\n x * width + padding : (x + 1) * width,\n ] = ndarray[k]\n k = k + 1\n\n ndarr = inverse_scaler(grid)\n ndarr = jnp.clip(ndarr * 255, 0, 255).astype(jnp.uint8)\n return ndarr" }, { "identifier": "process_images", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def process_images(images: Any) -> Any:\n \"\"\"Reshape images to the correct shape.\n\n Args:\n images: Tensor of images to reshape.\n\n Returns:\n A tensor of images with the correct shape.\n \"\"\"\n w = np.sqrt(images.shape[2]).astype(int)\n h = np.sqrt(images.shape[2]).astype(int)\n o = images.shape[3]\n return images.reshape(-1, w, h, o)" }, { "identifier": "save_samples", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def save_samples(round_num: int, samples: Any, file_path: str) -> None:\n \"\"\"Save samples to a file.\n\n Args:\n round_num: The round number of the evaluation.\n samples: Tensor of samples to save.\n file_path: string of the Path to the file where the samples will be saved.\n \"\"\"\n for i in range(samples.shape[0]):\n clean_path = os.path.join(file_path, f\"clean/samples_{round_num}_{i}.npy\")\n np.save(clean_path, samples[i])\n samples_path = os.path.join(file_path, f\"samples_{round_num}.npz\")\n with tf.io.gfile.GFile(samples_path, \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, samples=samples)\n f_out.write(io_buffer.getvalue())" }, { "identifier": "to_grayscale", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "@jax.pmap\ndef to_grayscale(images):\n weights = np.array([0.2989, 0.5870, 0.1140])[None, None, None, :] # Extend dimensions\n grayscale_images = np.sum(images * weights, axis=-1)\n return grayscale_images" }, { "identifier": "get_data_inverse_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_inverse_scaler(is_centered: bool) -> Callable:\n \"\"\"Inverse data normalizer.\n\n Rescale data to original range at the end of the diffusion.\n\n Args:\n is_centered: boolean if True data will rescaled from [-1, 1] to [0, 1].\n \"\"\"\n if is_centered:\n # Rescale [-1, 1] to [0, 1]\n return lambda x: (x + 1.0) / 2.0\n else:\n return lambda x: x" }, { "identifier": "get_data_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_scaler(is_centered: bool) -> Callable:\n \"\"\"Normalize data. Assume data are always in [0, 1].\n\n Args:\n is_centered: boolean if True data will be centered in [-1, 1].\n \"\"\"\n if is_centered:\n # Rescale to [-1, 1]\n return lambda x: x * 2.0 - 1.0\n else:\n return lambda x: x" }, { "identifier": "TrainState", "path": "src/functional_diffusion_processes/utils/training_state.py", "snippet": "class TrainState(train_state.TrainState):\n \"\"\"The training state for the model.\"\"\"\n\n opt_state_params: Any\n ema_params: Any\n rng: jax.random.PRNGKey" }, { "identifier": "colorizing_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def colorizing_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, gray_scale_img: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform colorizing task on a given grayscale image.\n\n Args:\n sample_fn (Callable): The sampling function used for colorization.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for colorization.\n gray_scale_img (jnp.ndarray): The grayscale image to be colorized.\n\n Returns:\n Tuple: The updated state and the colorized image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, gray_scale_img)" }, { "identifier": "construct_sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_sampling_fn(model: flax.linen.Module, sampler: Sampler) -> Callable:\n \"\"\"Construct a sampling function for generating samples from the model.\n\n Args:\n model (flax.linen.Module): The model instance from which to generate samples.\n sampler (Sampler): The sampler instance used for sampling.\n\n Returns:\n Callable: The constructed sampling function.\n \"\"\"\n predict_fn = model.make_predict_fn()\n if isinstance(model, BaseMAML):\n super_resolution_fn = model.make_super_resolution_fn()\n sample_fn = sampler.make_sampler(predict_fn, super_resolution_fn)\n else:\n sample_fn = sampler.make_sampler(predict_fn, None)\n return sample_fn" }, { "identifier": "construct_train_step", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_train_step(optimizer, loss_fn) -> Callable:\n \"\"\"Construct a train step function to be used in the training loop.\n\n This function creates a training step function which, when called, performs\n a single step of training including forward pass, loss computation, and\n backward pass for gradient computation and updates.\n\n Args:\n optimizer: The optimizer instance used for updating model parameters.\n loss_fn: The loss function used for computing the loss.\n\n Returns:\n Callable: The constructed train step function.\n \"\"\"\n\n @partial(jax.pmap, axis_name=\"device\")\n def train_fn(\n rng,\n params,\n optim_params,\n step,\n batch_input,\n batch,\n ):\n grad_params, (new_rng, loss, loss_inner, batch_reconstructed, batch_corrupted, target) = loss_fn(\n rng, params, step, batch_input, batch\n )\n\n loss = jax.lax.pmean(loss, axis_name=\"device\")\n grad_params = jax.lax.pmean(grad_params, axis_name=\"device\")\n\n updates, optim_params = optimizer.update(grad_params, optim_params, params)\n\n params = optax.apply_updates(params, updates)\n params = clip_learning_rates(params)\n return new_rng, loss, loss_inner, params, optim_params, batch_reconstructed, batch_corrupted, target\n\n return train_fn" }, { "identifier": "inpainting_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def inpainting_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, image: jnp.ndarray, mask: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform inpainting task on a given image using a mask.\n\n Args:\n sample_fn (Callable): The sampling function used for inpainting.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for inpainting.\n image (jnp.ndarray): The image to be inpainted.\n mask (jnp.ndarray): The mask used for inpainting.\n\n Returns:\n Tuple: The updated state and the inpainted image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, image, mask)" }, { "identifier": "sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def sampling_fn(sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray) -> Tuple:\n \"\"\"Perform sampling task using a given sampling function.\n\n Args:\n sample_fn (Callable): The sampling function.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for sampling.\n\n Returns:\n Tuple: The updated state after performing the sampling.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params)" } ]
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
12,261
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig,
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig,
sampler: Sampler,
5
2023-10-24 22:01:35+00:00
16k
KosinskiLab/pyTME
tme/tests/test_structure.py
[ { "identifier": "Structure", "path": "tme/structure.py", "snippet": "class Structure:\n \"\"\"Represents atomic structures in accordance with the Protein Data Bank (PDB)\n format specification.\n\n Attributes\n ----------\n record_type : NDArray\n Type of the record, e.g., ATOM, HETATM. Array shape = (n,)\n atom_serial_number : NDArray\n Serial number assigned to each atom. Array shape = (n,)\n atom_name : NDArray\n Standardized names for each atom. Array shape = (n,)\n atom_coordinate : NDArray\n The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )\n alternate_location_indicator : NDArray\n Indicator for alternate locations of an atom if it exists in multiple places.\n Array shape = (n,)\n residue_name : NDArray\n Standard residue names where each atom belongs. Array shape = (n,)\n chain_identifier : NDArray\n Identifier for the chain where each atom is located. Array shape = (n,)\n residue_sequence_number : NDArray\n Sequence number of the residue in the protein chain for each atom.\n Array shape = (n,)\n code_for_residue_insertion : NDArray\n Code to denote any residue insertion. Array shape = (n,)\n occupancy : NDArray\n Occupancy factor of each atom, indicating the fraction of time the atom\n is located at its position. Array shape = (n,)\n temperature_factor : NDArray\n Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)\n segment_identifier : NDArray\n Identifier for the segment where each atom belongs. Array shape = (n,)\n element_symbol : NDArray\n Atomic element symbol for each atom. Array shape = (n,)\n charge : NDArray\n Charge on the atom. Array shape = (n,)\n details : dict\n Any additional or auxiliary details. Array shape = (n,)\n\n References\n ----------\n .. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html\n \"\"\"\n\n #: Return a numpy array with record types, e.g. ATOM, HETATM.\n record_type: NDArray\n\n #: Return a numpy array with serial number of each atom.\n atom_serial_number: NDArray\n\n #: Return a numpy array with name of each atom.\n atom_name: NDArray\n\n #: Return a numpy array with coordinates of each atom in x, y, z.\n atom_coordinate: NDArray\n\n #: Return a numpy array with alternate location indicates of each atom.\n alternate_location_indicator: NDArray\n\n #: Return a numpy array with originating residue names of each atom.\n residue_name: NDArray\n\n #: Return a numpy array with originating structure chain of each atom.\n chain_identifier: NDArray\n\n #: Return a numpy array with originating residue id of each atom.\n residue_sequence_number: NDArray\n\n #: Return a numpy array with insertion information d of each atom.\n code_for_residue_insertion: NDArray\n\n #: Return a numpy array with occupancy factors of each atom.\n occupancy: NDArray\n\n #: Return a numpy array with B-factors for each atom.\n temperature_factor: NDArray\n\n #: Return a numpy array with segment identifier for each atom.\n segment_identifier: NDArray\n\n #: Return a numpy array with element symbols of each atom.\n element_symbol: NDArray\n\n #: Return a numpy array with charges of each atom.\n charge: NDArray\n\n #: Returns a dictionary with class instance metadata.\n details: dict\n\n def __post_init__(self, *args, **kwargs):\n \"\"\"\n Initialize the structure and populate header details.\n\n Raises\n ------\n ValueError\n If other NDArray attributes to not match the number of atoms.\n If the shape of atom_coordinates and chain_identifier doesn't match.\n \"\"\"\n self._elements = Elements()\n self.details = self._populate_details(self.details)\n\n n_atoms = self.atom_coordinate.shape[0]\n for attribute in self.__dict__:\n value = getattr(self, attribute)\n if type(value) != np.ndarray:\n continue\n if value.shape[0] != n_atoms:\n raise ValueError(\n f\"Expected shape of {attribute}: {n_atoms}, got {value.shape[0]}.\"\n )\n\n def __getitem__(self, indices: List[int]) -> \"Structure\":\n \"\"\"\n Get a Structure instance for specified indices.\n\n Parameters\n ----------\n indices : Union[int, bool, NDArray]\n The indices to get.\n\n Returns\n -------\n Structure\n The Structure instance for the given indices.\n \"\"\"\n if type(indices) in (int, bool):\n indices = (indices,)\n\n indices = np.asarray(indices)\n attributes = (\n \"record_type\",\n \"atom_serial_number\",\n \"atom_name\",\n \"atom_coordinate\",\n \"alternate_location_indicator\",\n \"residue_name\",\n \"chain_identifier\",\n \"residue_sequence_number\",\n \"code_for_residue_insertion\",\n \"occupancy\",\n \"temperature_factor\",\n \"segment_identifier\",\n \"element_symbol\",\n \"charge\",\n )\n kwargs = {attr: getattr(self, attr)[indices] for attr in attributes}\n ret = self.__class__(**kwargs, details={})\n return ret\n\n def __repr__(self):\n \"\"\"\n Return a string representation of the Structure.\n\n Returns\n -------\n str\n The string representation.\n \"\"\"\n unique_chains = \"-\".join(\n [\n \",\".join([str(x) for x in entity])\n for entity in self.details[\"unique_chains\"]\n ]\n )\n min_atom = np.min(self.atom_serial_number)\n max_atom = np.max(self.atom_serial_number)\n n_atom = self.atom_serial_number.size\n\n min_residue = np.min(self.residue_sequence_number)\n max_residue = np.max(self.residue_sequence_number)\n n_residue = self.residue_sequence_number.size\n\n repr_str = (\n f\"Structure object at {id(self)}\\n\"\n f\"Unique Chains: {unique_chains}, \"\n f\"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], \"\n f\"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]\"\n )\n return repr_str\n\n def get_chains(self) -> List[str]:\n \"\"\"\n Returns a list of available chains.\n\n Returns\n -------\n list\n The list of available chains.\n \"\"\"\n return list(self.details[\"chain_weight\"].keys())\n\n def copy(self) -> \"Structure\":\n \"\"\"\n Returns a copy of the Structure instance.\n\n Returns\n -------\n Structure\n The copied Structure instance.\n \"\"\"\n return deepcopy(self)\n\n def _populate_details(self, details: Dict = {}) -> Dict:\n \"\"\"\n Populate the details dictionary with the data from the Structure instance.\n\n Parameters\n ----------\n details : dict, optional\n The initial details dictionary, by default {}.\n\n Returns\n -------\n dict\n The populated details dictionary.\n \"\"\"\n details[\"weight\"] = np.sum(\n [self._elements[atype].atomic_weight for atype in self.element_symbol]\n )\n\n label, idx, chain = np.unique(\n self.chain_identifier, return_inverse=True, return_index=True\n )\n chain_weight = np.bincount(\n chain,\n [self._elements[atype].atomic_weight for atype in self.element_symbol],\n )\n labels = self.chain_identifier[idx]\n details[\"chain_weight\"] = {key: val for key, val in zip(labels, chain_weight)}\n\n # Group non-unique chains in separate lists in details[\"unique_chains\"]\n details[\"unique_chains\"], temp = [], {}\n for chain_label in label:\n index = len(details[\"unique_chains\"])\n chain_sequence = \"\".join(\n [\n str(y)\n for y in self.element_symbol[\n np.where(self.chain_identifier == chain_label)\n ]\n ]\n )\n if chain_sequence not in temp:\n temp[chain_sequence] = index\n details[\"unique_chains\"].append([chain_label])\n continue\n idx = temp.get(chain_sequence)\n details[\"unique_chains\"][idx].append(chain_label)\n\n filtered_data = [\n (label, integer)\n for label, integer in zip(\n self.chain_identifier, self.residue_sequence_number\n )\n ]\n filtered_data = sorted(filtered_data, key=lambda x: x[0])\n details[\"chain_range\"] = {}\n for label, values in groupby(filtered_data, key=lambda x: x[0]):\n values = [int(x[1]) for x in values]\n details[\"chain_range\"][label] = (min(values), max(values))\n\n return details\n\n @classmethod\n def from_file(\n cls,\n filename: str,\n keep_non_atom_records: bool = False,\n filter_by_elements: set = None,\n filter_by_residues: set = None,\n ) -> \"Structure\":\n \"\"\"\n Reads in an mmcif or pdb file and converts it into class instance.\n\n Parameters\n ----------\n filename : str\n Path to the mmcif or pdb file.\n keep_non_atom_records : bool, optional\n Wheter to keep residues that are not labelled ATOM.\n filter_by_elements: set, optional\n Which elements to keep. Default corresponds to all elements.\n filter_by_residues: set, optional\n Which residues to keep. Default corresponds to all residues.\n\n Raises\n ------\n ValueError\n If the extension is not '.pdb' or '.cif'.\n\n Returns\n -------\n Structure\n Read in structure file.\n \"\"\"\n _, file_extension = splitext(basename(filename.upper()))\n if file_extension == \".PDB\":\n func = cls._load_pdb\n elif file_extension == \".CIF\":\n func = cls._load_mmcif\n else:\n raise NotImplementedError(\n \"Could not determine structure filetype from extension.\"\n \" Supported filetypes are mmcif (.cif) and pdb (.pdb).\"\n )\n data = func(filename)\n\n keep = np.ones(data[\"element_symbol\"].size, dtype=bool)\n if filter_by_elements:\n keep = np.logical_and(\n keep,\n np.in1d(data[\"element_symbol\"], np.array(list(filter_by_elements))),\n )\n if filter_by_residues:\n keep = np.logical_and(\n keep, np.in1d(data[\"residue_name\"], np.array(list(filter_by_residues)))\n )\n if not keep_non_atom_records:\n keep = np.logical_and(keep, data[\"record_type\"] == \"ATOM\")\n\n for key in data:\n if key == \"details\":\n continue\n if type(data[key]) == np.ndarray:\n data[key] = data[key][keep]\n else:\n data[key] = [x for x, flag in zip(data[key], keep) if flag]\n\n data[\"details\"][\"filepath\"] = filename\n\n return cls(**data)\n\n @staticmethod\n def _load_mmcif(filename: str) -> Dict:\n \"\"\"\n Parses a macromolecular Crystallographic Information File (mmCIF)\n and returns the data in a dictionary format.\n\n Parameters\n ----------\n filename : str\n The filename of the mmCIF to load.\n\n Returns\n -------\n dict\n A dictionary of numpy arrays. Keys are the names of the PDB\n coordinate section. In addition, some details about the parsed\n structure are included. In case of conversion failure, the failing\n attribute is set to 0 if its supposed to be an integer value.\n \"\"\"\n result = MMCIFParser(filename)\n\n atom_site_mapping = {\n \"record_type\": (\"group_PDB\", str),\n \"atom_serial_number\": (\"id\", int),\n \"atom_name\": (\"label_atom_id\", str),\n \"alternate_location_indicator\": (\"label_alt_id\", str),\n \"residue_name\": (\"label_comp_id\", str),\n # \"chain_identifier\": (\"auth_asym_id\", str),\n \"chain_identifier\": (\"label_asym_id\", str),\n \"residue_sequence_number\": (\"label_seq_id\", int),\n \"code_for_residue_insertion\": (\"pdbx_PDB_ins_code\", str),\n \"occupancy\": (\"occupancy\", float),\n \"temperature_factor\": (\"B_iso_or_equiv\", float),\n \"segment_identifier\": (\"pdbx_PDB_model_num\", str),\n \"element_symbol\": (\"type_symbol\", str),\n \"charge\": (\"pdbx_formal_charge\", str),\n }\n\n out = {}\n for out_key, (atom_site_key, dtype) in atom_site_mapping.items():\n out_data = [\n x.strip() for x in result[\"atom_site\"].get(atom_site_key, [\".\"])\n ]\n if dtype == int:\n out_data = [0 if x == \".\" else int(x) for x in out_data]\n try:\n out[out_key] = np.asarray(out_data).astype(dtype)\n except ValueError:\n default = [\".\"] if dtype == str else 0\n print(f\"Converting {out_key} to {dtype} failed, set to {default}.\")\n out[out_key] = np.repeat(default, len(out_data)).astype(dtype)\n\n number_entries = len(max(out.values(), key=len))\n for key, value in out.items():\n if value.size != 1:\n continue\n out[key] = np.repeat(value, number_entries // value.size)\n\n out[\"details\"] = {}\n out[\"atom_coordinate\"] = np.transpose(\n np.array(\n [\n result[\"atom_site\"][\"Cartn_x\"],\n result[\"atom_site\"][\"Cartn_y\"],\n result[\"atom_site\"][\"Cartn_z\"],\n ],\n dtype=np.float32,\n )\n )\n\n detail_mapping = {\n \"resolution\": (\"em_3d_reconstruction\", \"resolution\", np.nan),\n \"resolution_method\": (\"em_3d_reconstruction\", \"resolution_method\", np.nan),\n \"method\": (\"exptl\", \"method\", np.nan),\n \"electron_source\": (\"em_imaging\", \"electron_source\", np.nan),\n \"illumination_mode\": (\"em_imaging\", \"illumination_mode\", np.nan),\n \"microscope_model\": (\"em_imaging\", \"microscope_model\", np.nan),\n }\n for out_key, (base_key, inner_key, default) in detail_mapping.items():\n if base_key not in result:\n continue\n out[\"details\"][out_key] = result[base_key].get(inner_key, default)\n\n return out\n\n @staticmethod\n def _load_pdb(filename: str) -> Dict:\n \"\"\"\n Parses a Protein Data Bank (PDB) file and returns the data\n in a dictionary format.\n\n Parameters\n ----------\n filename : str\n The filename of the PDB file to load.\n\n Returns\n -------\n dict\n A dictionary of numpy arrays. Keys are the names of the PDB\n coordinate section. In addition, some details about the parsed\n structure are included. In case of conversion failure, the failing\n attribute is set to 0 if its supposed to be an integer value.\n \"\"\"\n result = PDBParser(filename)\n\n atom_site_mapping = {\n \"record_type\": (\"record_type\", str),\n \"atom_serial_number\": (\"atom_serial_number\", int),\n \"atom_name\": (\"atom_name\", str),\n \"alternate_location_indicator\": (\"alternate_location_indicator\", str),\n \"residue_name\": (\"residue_name\", str),\n \"chain_identifier\": (\"chain_identifier\", str),\n \"residue_sequence_number\": (\"residue_sequence_number\", int),\n \"code_for_residue_insertion\": (\"code_for_residue_insertion\", str),\n \"occupancy\": (\"occupancy\", float),\n \"temperature_factor\": (\"temperature_factor\", float),\n \"segment_identifier\": (\"segment_identifier\", str),\n \"element_symbol\": (\"element_symbol\", str),\n \"charge\": (\"charge\", str),\n }\n\n out = {\"details\": result[\"details\"]}\n for out_key, (inner_key, dtype) in atom_site_mapping.items():\n out_data = [x.strip() for x in result[inner_key]]\n if dtype == int:\n out_data = [0 if x == \".\" else int(x) for x in out_data]\n try:\n out[out_key] = np.asarray(out_data).astype(dtype)\n except ValueError:\n default = \".\" if dtype == str else 0\n print(\n f\"Converting {out_key} to {dtype} failed. Setting {out_key} to {default}.\"\n )\n out[out_key] = np.repeat(default, len(out_data)).astype(dtype)\n\n out[\"atom_coordinate\"] = np.array(result[\"atom_coordinate\"], dtype=np.float32)\n\n return out\n\n def to_file(self, filename: str) -> None:\n \"\"\"\n Writes the Structure instance data to a Protein Data Bank (PDB) or\n macromolecular Crystallographic Information File (mmCIF) file depending\n one whether filename ends with '.pdb' or '.cif'.\n\n Raises\n ------\n ValueError\n If the extension is not '.pdb' or '.cif'.\n\n Parameters\n ----------\n filename : str\n The filename of the file to write.\n \"\"\"\n data_out = []\n if np.any(np.vectorize(len)(self.chain_identifier) > 2):\n warnings.warn(\"Chain identifiers longer than one will be shortened.\")\n\n _, file_extension = splitext(basename(filename.upper()))\n if file_extension == \".PDB\":\n func = self._write_pdb\n elif file_extension == \".CIF\":\n func = self._write_mmcif\n else:\n raise NotImplementedError(\n \"Could not determine structure filetype.\"\n \" Supported filetypes are mmcif (.cif) and pdb (.pdb).\"\n )\n\n if self.atom_coordinate.shape[0] > 10**5 and func == self._write_pdb:\n warnings.warn(\n \"The structure contains more than 100,000 atoms. Consider using mmcif.\"\n )\n\n with open(filename, mode=\"w\", encoding=\"utf-8\") as ofile:\n ofile.writelines(func())\n\n def _write_pdb(self) -> List[str]:\n \"\"\"\n Returns a PDB string representation of the structure instance.\n\n Returns\n -------\n list\n List containing PDB file coordine lines.\n \"\"\"\n data_out = []\n for index in range(self.atom_coordinate.shape[0]):\n x, y, z = self.atom_coordinate[index, :]\n line = list(\" \" * 80)\n line[0:6] = f\"{self.record_type[index]:<6}\"\n line[6:11] = f\"{self.atom_serial_number[index]:>5}\"\n line[12:16] = f\"{self.atom_name[index]:<4}\"\n line[16] = f\"{self.alternate_location_indicator[index]:<1}\"\n line[17:20] = f\"{self.residue_name[index]:<3}\"\n line[21] = f\"{self.chain_identifier[index][0]:<1}\"\n line[22:26] = f\"{self.residue_sequence_number[index]:>4}\"\n line[26] = f\"{self.code_for_residue_insertion[index]:<1}\"\n line[30:38] = f\"{x:>8.3f}\"\n line[38:46] = f\"{y:>8.3f}\"\n line[46:54] = f\"{z:>8.3f}\"\n line[54:60] = f\"{self.occupancy[index]:>6.2f}\"\n line[60:66] = f\"{self.temperature_factor[index]:>6.2f}\"\n line[72:76] = f\"{self.segment_identifier[index]:>4}\"\n line[76:78] = f\"{self.element_symbol[index]:<2}\"\n line[78:80] = f\"{self.charge[index]:>2}\"\n data_out.append(\"\".join(line))\n data_out.append(\"END\")\n data_out = \"\\n\".join(data_out)\n return data_out\n\n def _write_mmcif(self) -> List[str]:\n \"\"\"\n Returns a MMCIF string representation of the structure instance.\n\n Returns\n -------\n list\n List containing MMCIF file coordinate lines.\n \"\"\"\n model_num, entity_id = 1, 1\n data = {\n \"group_PDB\": [],\n \"id\": [],\n \"type_symbol\": [],\n \"label_atom_id\": [],\n \"label_alt_id\": [],\n \"label_comp_id\": [],\n \"label_asym_id\": [],\n \"label_entity_id\": [],\n \"label_seq_id\": [],\n \"pdbx_PDB_ins_code\": [],\n \"Cartn_x\": [],\n \"Cartn_y\": [],\n \"Cartn_z\": [],\n \"occupancy\": [],\n \"B_iso_or_equiv\": [],\n \"pdbx_formal_charge\": [],\n \"auth_seq_id\": [],\n \"auth_comp_id\": [],\n \"auth_asym_id\": [],\n \"auth_atom_id\": [],\n \"pdbx_PDB_model_num\": [],\n }\n\n for index in range(self.atom_coordinate.shape[0]):\n x, y, z = self.atom_coordinate[index, :]\n data[\"group_PDB\"].append(self.record_type[index])\n data[\"id\"].append(str(self.atom_serial_number[index]))\n data[\"type_symbol\"].append(self.element_symbol[index])\n data[\"label_atom_id\"].append(self.atom_name[index])\n data[\"label_alt_id\"].append(self.alternate_location_indicator[index])\n data[\"label_comp_id\"].append(self.residue_name[index])\n data[\"label_asym_id\"].append(self.chain_identifier[index][0])\n data[\"label_entity_id\"].append(str(entity_id))\n data[\"label_seq_id\"].append(str(self.residue_sequence_number[index]))\n data[\"pdbx_PDB_ins_code\"].append(self.code_for_residue_insertion[index])\n data[\"Cartn_x\"].append(f\"{x:.3f}\")\n data[\"Cartn_y\"].append(f\"{y:.3f}\")\n data[\"Cartn_z\"].append(f\"{z:.3f}\")\n data[\"occupancy\"].append(f\"{self.occupancy[index]:.2f}\")\n data[\"B_iso_or_equiv\"].append(f\"{self.temperature_factor[index]:.2f}\")\n data[\"pdbx_formal_charge\"].append(self.charge[index])\n data[\"auth_seq_id\"].append(str(self.residue_sequence_number[index]))\n data[\"auth_comp_id\"].append(self.residue_name[index])\n data[\"auth_asym_id\"].append(self.chain_identifier[index][0])\n data[\"auth_atom_id\"].append(self.atom_name[index])\n data[\"pdbx_PDB_model_num\"].append(str(model_num))\n\n output_data = {\"atom_site\": data}\n original_file = self.details.get(\"filepath\", \"\")\n try:\n new_data = {k: v for k, v in MMCIFParser(original_file).items()}\n index = self.atom_serial_number - 1\n new_data[\"atom_site\"] = {\n k: [v[i] for i in index] for k, v in new_data[\"atom_site\"].items()\n }\n new_data[\"atom_site\"][\"Cartn_x\"] = data[\"Cartn_x\"]\n new_data[\"atom_site\"][\"Cartn_y\"] = data[\"Cartn_y\"]\n new_data[\"atom_site\"][\"Cartn_z\"] = data[\"Cartn_z\"]\n output_data = new_data\n except Exception:\n pass\n\n ret = \"\"\n for category, subdict in output_data.items():\n ret += \"#\\n\"\n is_loop = isinstance(subdict[list(subdict.keys())[0]], list)\n if not is_loop:\n for k in subdict:\n ret += f\"_{category}.{k}\\t{subdict[k]}\\n\"\n else:\n ret += \"loop_\\n\"\n ret += \"\".join([f\"_{category}.{k}\\n\" for k in subdict])\n padded_subdict = _format_mmcif_colunns(subdict)\n\n data = [\n \"\".join([str(x) for x in content])\n for content in zip(*padded_subdict.values())\n ]\n ret += \"\\n\".join([entry for entry in data]) + \"\\n\"\n\n return ret\n\n def subset_by_chain(self, chain: str = None) -> \"Structure\":\n \"\"\"\n Return a subset of the structure that contains only atoms belonging to\n a specific chain. If no chain is specified, all chains are returned.\n\n Parameters\n ----------\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n\n Returns\n -------\n Structure\n A subset of the original structure containing only the specified chain.\n \"\"\"\n chain = np.unique(self.chain_identifier) if chain is None else chain.split(\",\")\n keep = np.in1d(self.chain_identifier, chain)\n return self[keep]\n\n def subset_by_range(\n self,\n start: int,\n stop: int,\n chain: str = None,\n ) -> \"Structure\":\n \"\"\"\n Return a subset of the structure within a specific range of residues.\n\n Parameters\n ----------\n start : int\n The starting residue sequence number.\n\n stop : int\n The ending residue sequence number.\n\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n\n Returns\n -------\n Structure\n A subset of the original structure within the specified residue range.\n \"\"\"\n ret = self.subset_by_chain(chain=chain)\n keep = np.logical_and(\n ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop\n )\n return ret[keep]\n\n def center_of_mass(self) -> NDArray:\n \"\"\"\n Calculate the center of mass of the structure.\n\n Returns\n -------\n NDArray\n The center of mass of the structure.\n \"\"\"\n weights = [self._elements[atype].atomic_weight for atype in self.element_symbol]\n return np.dot(self.atom_coordinate.T, weights) / np.sum(weights)\n\n def rigid_transform(\n self,\n rotation_matrix: NDArray,\n translation: NDArray,\n use_geometric_center: bool = False,\n ) -> \"Structure\":\n \"\"\"\n Performs a rigid transform of internal structure coordinates.\n\n Parameters\n ----------\n rotation_matrix : NDArray\n The rotation matrix to apply to the coordinates.\n translation : NDArray\n The vector to translate the coordinates by.\n use_geometric_center : bool, optional\n Whether to use geometric or coordinate center.\n\n Returns\n -------\n Structure\n The transformed instance of :py:class:`tme.structure.Structure`.\n \"\"\"\n out = np.empty_like(self.atom_coordinate.T)\n rigid_transform(\n coordinates=self.atom_coordinate.T,\n rotation_matrix=rotation_matrix,\n translation=translation,\n out=out,\n use_geometric_center=use_geometric_center,\n )\n ret = self.copy()\n ret.atom_coordinate = out.T.copy()\n return ret\n\n def centered(self) -> Tuple[\"Structure\", NDArray]:\n \"\"\"\n Shifts the structure analogous to :py:meth:`tme.density.Density.centered`.\n\n Returns\n -------\n Structure\n A copy of the class instance whose data center of mass is in the\n center of the data array.\n NDArray\n The coordinate translation.\n\n See Also\n --------\n :py:meth:`tme.Density.centered`\n \"\"\"\n center_of_mass = self.center_of_mass()\n enclosing_box = minimum_enclosing_box(coordinates=self.atom_coordinate.T)\n shift = np.subtract(np.divide(enclosing_box, 2), center_of_mass)\n\n transformed_structure = self.rigid_transform(\n translation=shift, rotation_matrix=np.eye(shift.size)\n )\n\n return transformed_structure, shift\n\n def _coordinate_to_position(\n self,\n shape: Tuple[int],\n sampling_rate: Tuple[float],\n origin: Tuple[float],\n ) -> (NDArray, Tuple[str], Tuple[int], float, Tuple[float]):\n \"\"\"\n Converts coordinates to positions.\n\n Parameters\n ----------\n shape : Tuple[int,]\n The desired shape of the output array.\n\n sampling_rate : float\n The sampling rate of the output array in unit of self.atom_coordinate.\n\n origin : Tuple[float,]\n The origin of the coordinate system.\n Returns\n -------\n Tuple[NDArray, List[str], Tuple[int, ], float, Tuple[float,]]\n Returns positions, atom_types, shape, sampling_rate, and origin.\n \"\"\"\n coordinates = self.atom_coordinate.copy()\n atom_types = self.element_symbol.copy()\n\n # positions are in x, y, z map is z, y, x\n coordinates = coordinates[:, ::-1]\n\n sampling_rate = 1 if sampling_rate is None else sampling_rate\n adjust_origin = origin is not None and shape is None\n origin = coordinates.min(axis=0) if origin is None else origin\n positions = (coordinates - origin) / sampling_rate\n positions = np.rint(positions).astype(int)\n\n if adjust_origin:\n left_shift = positions.min(axis=0)\n positions -= left_shift\n shape = positions.max(axis=0) + 1\n origin = origin + np.multiply(left_shift, sampling_rate)\n\n if shape is None:\n shape = positions.max(axis=0) + 1\n\n valid_positions = np.sum(\n np.logical_and(positions < shape, positions >= 0), axis=1\n )\n\n positions = positions[valid_positions == positions.shape[1], :]\n atom_types = atom_types[valid_positions == positions.shape[1]]\n\n self.details[\"nAtoms_outOfBound\"] = 0\n if positions.shape[0] != coordinates.shape[0]:\n out_of_bounds = coordinates.shape[0] - positions.shape[0]\n print(f\"{out_of_bounds}/{coordinates.shape[0]} atoms were out of bounds.\")\n self.details[\"nAtoms_outOfBound\"] = out_of_bounds\n\n return positions, atom_types, shape, sampling_rate, origin\n\n def _position_to_vdw_sphere(\n self,\n positions: Tuple[float],\n atoms: Tuple[str],\n sampling_rate: Tuple[float],\n volume: NDArray,\n ) -> None:\n \"\"\"\n Updates a volume with van der Waals spheres.\n\n Parameters\n ----------\n positions : Tuple[float, float, float]\n The positions of the atoms.\n\n atoms : Tuple[str]\n The types of the atoms.\n\n sampling_rate : float\n The desired sampling rate in unit of self.atom_coordinate of the\n output array.\n\n volume : NDArray\n The volume to update.\n \"\"\"\n index_dict, vdw_rad, shape = {}, {}, volume.shape\n for atom_index, atom_position in enumerate(positions):\n atom_type = atoms[atom_index]\n if atom_type not in index_dict.keys():\n atom_vdwr = np.ceil(\n np.divide(self._elements[atom_type].vdwr, (sampling_rate * 100))\n ).astype(int)\n\n vdw_rad[atom_type] = atom_vdwr\n atom_slice = tuple(slice(-k, k + 1) for k in atom_vdwr)\n distances = np.linalg.norm(\n np.divide(\n np.mgrid[atom_slice],\n atom_vdwr.reshape((-1,) + (1,) * volume.ndim),\n ),\n axis=0,\n )\n index_dict[atom_type] = (distances <= 1).astype(volume.dtype)\n\n footprint = index_dict[atom_type]\n start = np.maximum(np.subtract(atom_position, vdw_rad[atom_type]), 0)\n stop = np.minimum(np.add(atom_position, vdw_rad[atom_type]) + 1, shape)\n volume_slice = tuple(slice(*coord) for coord in zip(start, stop))\n\n start_index = np.maximum(-np.subtract(atom_position, vdw_rad[atom_type]), 0)\n stop_index = np.add(\n footprint.shape,\n np.minimum(\n np.subtract(shape, np.add(atom_position, vdw_rad[atom_type]) + 1), 0\n ),\n )\n index_slice = tuple(slice(*coord) for coord in zip(start_index, stop_index))\n volume[volume_slice] += footprint[index_slice]\n\n def _position_to_scattering_factors(\n self,\n positions: NDArray,\n atoms: NDArray,\n sampling_rate: NDArray,\n volume: NDArray,\n lowpass_filter: bool = True,\n downsampling_factor: float = 1.35,\n source: str = \"peng1995\",\n ) -> None:\n \"\"\"\n Updates a volume with scattering factors.\n\n Parameters\n ----------\n positions : NDArray\n The positions of the atoms.\n atoms : NDArray\n Element symbols.\n sampling_rate : float\n Sampling rate that was used to convert coordinates to positions.\n volume : NDArray\n The volume to update.\n lowpass_filter : NDArray\n Whether the scattering factors hsould be lowpass filtered.\n downsampling_factor : NDArray\n Downsampling factor for scattering factor computation.\n source : str\n Which scattering factors to use\n\n Reference\n ---------\n https://github.com/I2PC/xmipp.\n \"\"\"\n scattering_profiles, shape = dict(), volume.shape\n for atom_index, point in enumerate(positions):\n if atoms[atom_index] not in scattering_profiles:\n spline = atom_profile(\n atom=atoms[atom_index],\n M=downsampling_factor,\n method=source,\n lfilter=lowpass_filter,\n )\n scattering_profiles.update({atoms[atom_index]: spline})\n\n atomic_radius = np.divide(\n self._elements[atoms[atom_index]].vdwr, sampling_rate * 100\n )\n starts = np.maximum(np.ceil(point - atomic_radius), 0).astype(int)\n stops = np.minimum(np.floor(point + atomic_radius), shape).astype(int)\n\n grid_index = np.meshgrid(\n *[range(start, stop) for start, stop in zip(starts, stops)]\n )\n distances = np.einsum(\n \"aijk->ijk\",\n np.array([(grid_index[i] - point[i]) ** 2 for i in range(len(point))]),\n dtype=np.float64,\n )\n distances = np.sqrt(distances)\n if not len(distances):\n grid_index, distances = point, 0\n np.add.at(\n volume,\n tuple(grid_index),\n scattering_profiles[atoms[atom_index]](distances),\n )\n\n def _get_atom_weights(\n self, atoms: Tuple[str] = None, weight_type: str = \"atomic_weight\"\n ) -> Tuple[float]:\n \"\"\"\n Returns weights of individual atoms according to a specified weight type.\n\n Parameters\n ----------\n atoms : Tuple of strings, optional\n The atoms to get the weights for. If None, weights for all atoms\n are used. Default is None.\n\n weight_type : str, optional\n The type of weights to return. This can either be 'atomic_weight',\n 'atomic_number', or 'van_der_waals_radius'. Default is 'atomic_weight'.\n\n Returns\n -------\n List[float]\n A list containing the weights of the atoms.\n \"\"\"\n atoms = self.element_symbol if atoms is None else atoms\n match weight_type:\n case \"atomic_weight\":\n weight = [self._elements[atom].atomic_weight for atom in atoms]\n case \"atomic_number\":\n weight = [self._elements[atom].atomic_number for atom in atoms]\n case _:\n raise NotImplementedError(\n \"weight_type can either be 'atomic_weight' or 'atomic_number'\"\n )\n return weight\n\n def to_volume(\n self,\n shape: Tuple[int] = None,\n sampling_rate: NDArray = None,\n origin: Tuple[float] = None,\n chain: str = None,\n weight_type: str = \"atomic_weight\",\n scattering_args: Dict = dict(),\n ) -> Tuple[NDArray, Tuple[int], NDArray]:\n \"\"\"\n Converts atom coordinates of shape [n x 3] x, y, z to a volume with\n index z, y, x.\n\n Parameters\n ----------\n shape : Tuple[int, ...], optional\n Desired shape of the output array. If shape is given its expected to be\n in z, y, x form.\n sampling_rate : float, optional\n Sampling rate of the output array in the unit of self.atom_coordinate\n origin : Tuple[float, ...], optional\n Origin of the coordinate system. If origin is given its expected to be\n in z, y, x form.\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n weight_type : str, optional\n Which weight should be given to individual atoms.\n scattering_args : dict, optional\n Additional arguments for scattering factor computation.\n\n Returns\n -------\n Tuple[NDArray, Tuple[int], NDArray]\n The volume, its origin and the voxel size in Ångstrom.\n \"\"\"\n _weight_types = {\n \"atomic_weight\",\n \"atomic_number\",\n \"van_der_waals_radius\",\n \"scattering_factors\",\n \"lowpass_scattering_factors\",\n }\n _weight_string = \",\".join([f\"'{x}'\" for x in _weight_types])\n if weight_type not in _weight_types:\n raise NotImplementedError(f\"weight_type needs to be in {_weight_string}\")\n\n if sampling_rate is None:\n sampling_rate = np.ones(self.atom_coordinate.shape[1])\n sampling_rate = np.array(sampling_rate)\n if sampling_rate.size == 1:\n sampling_rate = np.repeat(sampling_rate, self.atom_coordinate.shape[1])\n elif sampling_rate.size != self.atom_coordinate.shape[1]:\n raise ValueError(\n \"sampling_rate should either be single value of array with\"\n f\"size {self.atom_coordinate.shape[1]}.\"\n )\n if \"source\" not in scattering_args:\n scattering_args[\"source\"] = \"peng1995\"\n\n temp = self.subset_by_chain(chain=chain)\n\n positions, atoms, shape, sampling_rate, origin = temp._coordinate_to_position(\n shape=shape, sampling_rate=sampling_rate, origin=origin\n )\n volume = np.zeros(shape, dtype=np.float32)\n if weight_type in (\"atomic_weight\", \"atomic_number\"):\n weights = temp._get_atom_weights(atoms=atoms, weight_type=weight_type)\n np.add.at(volume, tuple(positions.T), weights)\n elif weight_type == \"van_der_waals_radius\":\n self._position_to_vdw_sphere(positions, atoms, sampling_rate, volume)\n elif weight_type == \"scattering_factors\":\n self._position_to_scattering_factors(\n positions,\n atoms,\n sampling_rate,\n volume,\n lowpass_filter=False,\n **scattering_args,\n )\n elif weight_type == \"lowpass_scattering_factors\":\n self._position_to_scattering_factors(\n positions,\n atoms,\n sampling_rate,\n volume,\n lowpass_filter=True,\n **scattering_args,\n )\n\n self.details.update(temp.details)\n return volume, origin, sampling_rate\n\n @classmethod\n def compare_structures(\n cls,\n structure1: \"Structure\",\n structure2: \"Structure\",\n origin: NDArray = None,\n sampling_rate: float = None,\n weighted: bool = False,\n ) -> float:\n \"\"\"\n Compute root mean square deviation (RMSD) between two structures.\n\n Both structures need to have the same number of atoms. In practice, this means\n that *structure2* is a transformed version of *structure1*\n\n Parameters\n ----------\n structure1 : Structure\n Structure 1.\n\n structure2 : Structure\n Structure 2.\n\n origin : NDArray, optional\n Origin of the structure coordinate system.\n\n sampling_rate : float, optional\n Sampling rate if discretized on a grid in the unit of self.atom_coordinate.\n\n weighted : bool, optional\n Whether atoms should be weighted by their atomic weight.\n\n Returns\n -------\n float\n Root Mean Square Deviation (RMSD)\n \"\"\"\n if origin is None:\n origin = np.zeros(structure1.atom_coordinate.shape[1])\n\n coordinates1 = structure1.atom_coordinate\n coordinates2 = structure2.atom_coordinate\n atoms1, atoms2 = structure1.element_symbol, structure2.element_symbol\n if sampling_rate is not None:\n coordinates1 = np.rint((coordinates1 - origin) / sampling_rate).astype(int)\n coordinates2 = np.rint((coordinates2 - origin) / sampling_rate).astype(int)\n\n weights1 = np.array(structure1._get_atom_weights(atoms=atoms1))\n weights2 = np.array(structure2._get_atom_weights(atoms=atoms2))\n if not weighted:\n weights1 = np.ones_like(weights1)\n weights2 = np.ones_like(weights2)\n\n if not np.allclose(coordinates1.shape, coordinates2.shape):\n raise ValueError(\n \"Input structures need to have the same number of coordinates.\"\n )\n if not np.allclose(weights1.shape, weights2.shape):\n raise ValueError(\"Input structures need to have the same number of atoms.\")\n\n squared_diff = np.sum(np.square(coordinates1 - coordinates2), axis=1)\n weighted_quared_diff = squared_diff * ((weights1 + weights2) / 2)\n rmsd = np.sqrt(np.mean(weighted_quared_diff))\n\n return rmsd\n\n @classmethod\n def align_structures(\n cls,\n structure1: \"Structure\",\n structure2: \"Structure\",\n origin: NDArray = None,\n sampling_rate: float = None,\n weighted: bool = False,\n ) -> Tuple[\"Structure\", float]:\n \"\"\"\n Align the atom coordinates of structure2 to structure1 using\n the Kabsch algorithm.\n\n Both structures need to have the same number of atoms. In practice, this means\n that *structure2* is a subset of *structure1*\n\n Parameters\n ----------\n structure1 : Structure\n Structure 1.\n\n structure2 : Structure\n Structure 2.\n\n origin : NDArray, optional\n Origin of the structure coordinate system.\n\n sampling_rate : float, optional\n Voxel size if discretized on a grid.\n\n weighted : bool, optional\n Whether atoms should be weighted by their atomic weight.\n\n Returns\n -------\n Structure\n *structure2* aligned to *structure1*.\n float\n Root Mean Square Error (RMSE)\n \"\"\"\n if origin is None:\n origin = np.minimum(\n structure1.atom_coordinate.min(axis=0),\n structure2.atom_coordinate.min(axis=0),\n ).astype(int)\n\n initial_rmsd = cls.compare_structures(\n structure1=structure1,\n structure2=structure2,\n origin=origin,\n sampling_rate=sampling_rate,\n weighted=weighted,\n )\n\n reference = structure1.atom_coordinate.copy()\n query = structure2.atom_coordinate.copy()\n if sampling_rate is not None:\n reference, atoms1, shape, _, _ = structure1._coordinate_to_position(\n shape=None, sampling_rate=sampling_rate, origin=origin\n )\n query, atoms2, shape, _, _ = structure2._coordinate_to_position(\n shape=None, sampling_rate=sampling_rate, origin=origin\n )\n\n reference_mean = reference.mean(axis=0)\n query_mean = query.mean(axis=0)\n\n reference = reference - reference_mean\n query = query - query_mean\n\n corr = np.dot(query.T, reference)\n U, S, Vh = np.linalg.svd(corr)\n\n rotation = np.dot(Vh.T, U.T).T\n if np.linalg.det(rotation) < 0:\n Vh[2, :] *= -1\n rotation = np.dot(Vh.T, U.T).T\n\n translation = reference_mean - np.dot(query_mean, rotation)\n\n temp = structure1.copy()\n temp.atom_coordinate = reference + reference_mean\n ret = structure2.copy()\n ret.atom_coordinate = np.dot(query + query_mean, rotation) + translation\n\n final_rmsd = cls.compare_structures(\n structure1=temp,\n structure2=ret,\n origin=origin,\n sampling_rate=None,\n weighted=weighted,\n )\n\n print(f\"Initial RMSD: {initial_rmsd:.5f} - Final RMSD: {final_rmsd:.5f}\")\n\n return ret, final_rmsd" }, { "identifier": "euler_to_rotationmatrix", "path": "tme/matching_utils.py", "snippet": "def euler_to_rotationmatrix(angles: Tuple[float]) -> NDArray:\n \"\"\"\n Convert Euler angles to a rotation matrix.\n\n Parameters\n ----------\n angles : tuple\n A tuple representing the Euler angles in degrees.\n\n Returns\n -------\n NDArray\n The generated rotation matrix.\n \"\"\"\n if len(angles) == 1:\n angles = (angles, 0, 0)\n rotation_matrix = (\n Rotation.from_euler(\"zyx\", angles, degrees=True).as_matrix().astype(np.float32)\n )\n return rotation_matrix" }, { "identifier": "minimum_enclosing_box", "path": "tme/matching_utils.py", "snippet": "def minimum_enclosing_box(\n coordinates: NDArray,\n margin: NDArray = None,\n use_geometric_center: bool = False,\n) -> Tuple[int]:\n \"\"\"\n Computes the minimal enclosing box around coordinates with margin.\n\n Parameters\n ----------\n coordinates : NDArray\n Coordinates of which the enclosing box should be computed. The shape\n of this array should be [d, n] with d dimensions and n coordinates.\n margin : NDArray, optional\n Box margin. Defaults to None.\n use_geometric_center : bool, optional\n Whether the box should accommodate the geometric or the coordinate\n center. Defaults to False.\n\n Returns\n -------\n tuple\n Integers corresponding to the minimum enclosing box shape.\n \"\"\"\n point_cloud = np.asarray(coordinates)\n dim = point_cloud.shape[0]\n point_cloud = point_cloud - point_cloud.min(axis=1)[:, None]\n\n margin = np.zeros(dim) if margin is None else margin\n margin = np.asarray(margin).astype(int)\n\n norm_cloud = point_cloud - point_cloud.mean(axis=1)[:, None]\n # Adding one avoids clipping during scipy.ndimage.affine_transform\n shape = np.repeat(\n np.ceil(2 * np.linalg.norm(norm_cloud, axis=0).max()) + 1, dim\n ).astype(int)\n if use_geometric_center:\n hull = ConvexHull(point_cloud.T)\n distance, _ = max_euclidean_distance(point_cloud[:, hull.vertices].T)\n distance += np.linalg.norm(np.ones(dim))\n shape = np.repeat(np.rint(distance).astype(int), dim)\n\n return shape" } ]
from tempfile import mkstemp from os import remove from tme import Structure from tme.matching_utils import euler_to_rotationmatrix, minimum_enclosing_box import pytest import numpy as np
11,743
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self):
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self):
self.structure = Structure.from_file("./tme/tests/data/Structures/5khe.cif")
0
2023-10-20 13:46:01+00:00
16k
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str, device=\"cuda\")\n torch.save(bert, bert_path)\n assert bert.shape[-1] == len(phone), phone\n\n if language_str == \"ZH\":\n bert = bert\n ja_bert = torch.zeros(768, len(phone))\n elif language_str == \"JP\":\n ja_bert = bert\n bert = torch.zeros(1024, len(phone))\n else:\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(768, len(phone))\n assert bert.shape[-1] == len(phone), (\n bert.shape,\n len(phone),\n sum(word2ph),\n p1,\n p2,\n t1,\n t2,\n pold,\n pold2,\n word2ph,\n text,\n w2pho,\n )\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=6,\n flow_share_parameter=False,\n use_transformer_flow=True,\n subbands=8, # add\n gen_istft_n_fft=16, # add\n gen_istft_hop_size=4, # add\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.subbands = subbands\n self.gen_istft_n_fft = gen_istft_n_fft\n self.gen_istft_hop_size = gen_istft_hop_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n # self.dec = Generator(\n # inter_channels,\n # resblock,\n # resblock_kernel_sizes,\n # resblock_dilation_sizes,\n # upsample_rates,\n # upsample_initial_channel,\n # upsample_kernel_sizes,\n # gin_channels=gin_channels,\n # )\n self.dec = Multistream_iSTFT_Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gen_istft_n_fft,\n gen_istft_hop_size, \n subbands,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import torch import torch.distributed as dist import logging import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,061
else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): #dist.init_process_group( # backend="gloo", # init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl. #) # Use torchrun instead of mp.spawn #rank = dist.get_rank() #n_gpus = dist.get_world_size() rank = 0 n_gpus = 1 hps = utils.get_hparams() torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) # if net_dur_disc is not None: # net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: # if not optim_dur_disc.param_groups[0].get("initial_lr"): # optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
y_hat_mel = mel_spectrogram_torch(
10
2023-10-16 10:04:32+00:00
16k
violet-sto/HN-GFN
proxy/proxy.py
[ { "identifier": "Regressor", "path": "proxy/regression.py", "snippet": "class Regressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, dropout_rate=0, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.args = args\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n\n self.proxy = MPNNet_v2(\n num_feat=num_feat,\n num_vec=nvec,\n dim=nhid,\n num_out_per_mol=num_out_per_mol,\n num_out_per_stem=num_out_per_stem,\n num_conv_steps=num_conv_steps,\n version=version,\n dropout_rate=dropout_rate)\n\n def fit(self, dataset, opt):\n last_losses = []\n train_losses = []\n test_losses = []\n time_start = time.time()\n time_last_check = time.time()\n best_test_loss = 1000\n mbsize = self.args.proxy_mbsize\n early_stop_tol = self.args.proxy_early_stop_tol\n early_stop_count = 0\n\n self.proxy.train()\n for i in range(self.args.proxy_num_iterations+1):\n s, r = dataset.sample2batch(dataset.sample(mbsize))\n # s.x s.edge_index, s.edge_attr, s.stems: stem_atmidxs\n\n stem_out_s, mol_out_s = self.proxy(s, None, do_stems=False)\n loss = F.mse_loss(mol_out_s, r)\n last_losses.append((loss.item(),))\n train_losses.append((loss.item(),))\n opt.zero_grad()\n loss.backward()\n opt.step()\n self.proxy.training_steps = i + 1\n\n if not i % 50:\n train_loss = [np.round(np.mean(i), 4)\n for i in zip(*last_losses)]\n last_losses = []\n\n total_test_loss = 0\n\n self.proxy.eval()\n for s, r in dataset.iterset(max(mbsize, 64), mode='test'):\n with torch.no_grad():\n stem_o, mol_o = self.proxy(s, None, do_stems=False)\n loss = F.mse_loss(mol_o, r, reduction='sum')\n total_test_loss += loss.item()\n self.proxy.train()\n\n test_loss = total_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n test_losses.append(test_loss)\n print('Iter {}: Train Loss {}, Test Loss {}, Time {}'.format(\n i, train_loss[0], round(test_loss, 4), round(time.time() - time_last_check, 3)))\n time_last_check = time.time()\n\n if test_loss < best_test_loss:\n best_test_loss = test_loss\n best_model = deepcopy(self.proxy)\n best_model.to('cpu')\n early_stop_count = 0\n if self.args.save:\n self.save(self.args.log_dir)\n\n else:\n early_stop_count += 1\n print('Early stop count: {}'.format(early_stop_count))\n\n if early_stop_count >= early_stop_tol:\n print('Early stopping! Training time: {}, Best test loss: {}'.format(\n time.time()-time_start, best_test_loss))\n break\n\n self.proxy = deepcopy(best_model)\n self.proxy.to(self.args.device)\n\n def forward(self, graph, vec=None, do_stems=True, do_bonds=False, k=None):\n return self.proxy(graph, vec, do_stems=do_stems, do_bonds=do_bonds, k=k)\n\n def posterior(self, x):\n self.proxy.eval()\n with torch.no_grad():\n outputs = self.forward(x, None, do_stems=False)[1].squeeze(0)\n\n posterior = MyPosterior(outputs, torch.zeros_like(outputs))\n\n return posterior\n\n def save(self, checkpoint_dir):\n checkpoint_path = os.path.join(\n checkpoint_dir, f\"proxy_init_checkpoint.pth\")\n torch.save(self.proxy.state_dict(), checkpoint_path)" }, { "identifier": "DropoutRegressor", "path": "proxy/regression.py", "snippet": "class DropoutRegressor(Regressor):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, \\\n dropout_rate=0, num_dropout_samples=25, do_stem_mask=True, do_nblocks=False):\n super().__init__(args, nhid, nvec, num_out_per_stem, num_out_per_mol,\n num_conv_steps, version, dropout_rate, do_stem_mask, do_nblocks)\n self.proxy_num_dropout_samples = num_dropout_samples\n\n def posterior(self, x):\n self.proxy.train()\n with torch.no_grad():\n outputs = torch.cat([self.forward(x, None, do_stems=False)[1].unsqueeze(0)\n for _ in range(self.proxy_num_dropout_samples)])\n\n posterior = MyPosterior(outputs.mean(dim=0), outputs.var(dim=0))\n\n return posterior" }, { "identifier": "EvidentialRegressor", "path": "proxy/regression.py", "snippet": "class EvidentialRegressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, dropout_rate=0, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.args = args\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n\n self.proxy = MPNNet_v2(\n num_feat=num_feat,\n num_vec=nvec,\n dim=nhid,\n num_out_per_mol=num_out_per_mol*4,\n num_out_per_stem=num_out_per_stem,\n num_conv_steps=num_conv_steps,\n version=version,\n dropout_rate=dropout_rate)\n\n def fit(self, dataset, opt, mean, std, round_idx):\n self.mean = mean\n self.std = std\n \n last_losses = []\n train_losses = []\n test_losses = []\n time_start = time.time()\n time_last_check = time.time()\n best_test_loss = 1000\n mbsize = self.args.proxy_mbsize\n early_stop_tol = self.args.proxy_early_stop_tol\n early_stop_count = 0\n \n stop_event = threading.Event()\n sampler = dataset.start_samplers(1, mbsize)\n\n def stop_everything():\n stop_event.set()\n print('joining')\n dataset.stop_samplers_and_join()\n\n self.proxy.train()\n for i in range(self.args.proxy_num_iterations+1):\n r = sampler()\n for thread in dataset.sampler_threads:\n if thread.failed:\n stop_event.set()\n stop_everything()\n pdb.post_mortem(thread.exception.__traceback__)\n s, r = r\n r = (r - mean) / std\n # s.x s.edge_index, s.edge_attr, s.stems: stem_atmidxs\n \n # if bounds is not None:\n # r = normalize(r, bounds)\n means, lambdas, alphas, betas = self.forward(s, None, do_stems=False)\n # the larger the lam, the larger the variance\n loss = evidential_loss(means, lambdas, alphas, betas, r, lam=self.args.evidential_lam).mean()\n last_losses.append((loss.item(),))\n train_losses.append((loss.item(),))\n opt.zero_grad()\n loss.backward()\n opt.step()\n self.proxy.training_steps = i + 1\n\n if not i % 50:\n train_loss = [np.round(np.mean(i), 4)\n for i in zip(*last_losses)]\n last_losses = []\n\n total_test_loss = 0\n total_normalize_test_loss = 0\n\n self.proxy.eval()\n for s, r in dataset.iterset(max(mbsize, 64), mode='test'):\n with torch.no_grad():\n means, lambdas, alphas, betas = self.forward(s, None, do_stems=False)\n # if bounds is not None:\n # means = unnormalize(means, bounds)\n normalize_loss = F.mse_loss(means, (r-mean)/std, reduction='sum')\n total_normalize_test_loss += normalize_loss.item()\n means = means * std + mean\n loss = F.mse_loss(means, r, reduction='sum')\n total_test_loss += loss.item()\n self.proxy.train()\n\n test_loss = total_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n normalize_test_loss = total_normalize_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n test_losses.append(test_loss)\n print('Iter {}: Train Loss {}, Test Loss {}, Normalize Test Loss {}, Time {}'.format(\n i, train_loss[0], round(test_loss, 4), round(normalize_test_loss, 4), round(time.time() - time_last_check, 3)))\n time_last_check = time.time()\n\n if normalize_test_loss < best_test_loss:\n best_test_loss = normalize_test_loss\n best_model = deepcopy(self.proxy)\n best_model.to('cpu')\n early_stop_count = 0\n if self.args.save:\n self.save(self.args.log_dir, round_idx)\n\n else:\n early_stop_count += 1\n print('Early stop count: {}'.format(early_stop_count))\n\n if early_stop_count >= early_stop_tol:\n print('Early stopping! Training time: {}, Best test loss: {}'.format(\n time.time()-time_start, best_test_loss))\n break\n \n stop_everything()\n self.proxy = deepcopy(best_model)\n self.proxy.to(self.args.device)\n\n def forward(self, graph, vec=None, do_stems=True, do_bonds=False, k=None):\n _, mol_out_s = self.proxy(graph, vec, do_stems=do_stems,\n do_bonds=do_bonds, k=k)\n min_val = 1e-6\n means, loglambdas, logalphas, logbetas = torch.split(\n mol_out_s, mol_out_s.shape[1]//4, dim=1)\n lambdas = F.softplus(loglambdas) + min_val\n alphas = F.softplus(logalphas) + min_val + 1 # add 1 for numerical contraints of Gamma function\n betas = F.softplus(logbetas) + min_val\n\n return means, lambdas, alphas, betas\n \n def posterior(self, X, posterior_transform=None):\n self.proxy.eval()\n with torch.no_grad():\n means, lambdas, alphas, betas = self.forward(X, None, do_stems=False)\n inverse_evidence = 1. / ((alphas-1) * lambdas)\n vars = betas * inverse_evidence\n \n means = means * self.std + self.mean\n vars = vars * self.std ** 2\n \n # vars = BlockDiagLazyTensor(torch.diag(vars.squeeze()).unsqueeze(0))\n covariance_matrix = lazify(torch.diag(vars.squeeze()))\n mvn = MultitaskMultivariateNormal(means, covariance_matrix)\n \n posterior = GPyTorchPosterior(mvn)\n\n if posterior_transform is not None:\n return posterior_transform(posterior) \n return posterior\n\n def save(self, checkpoint_dir, round_idx):\n checkpoint_path = os.path.join(\n checkpoint_dir, f\"{round_idx}_proxy_checkpoint.pth\")\n torch.save(self.proxy.state_dict(), checkpoint_path)" }, { "identifier": "EnsembleRegressor", "path": "proxy/regression.py", "snippet": "class EnsembleRegressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, \\\n dropout_rate=0, num_dropout_samples=5, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n self.proxy_num_dropout_samples = num_dropout_samples\n self.args = args\n self.device = args.device\n self.proxy = [MPNNet_v2(\n num_feat=num_feat,\n num_vec=nvec,\n dim=nhid,\n num_out_per_mol=num_out_per_mol,\n num_out_per_stem=num_out_per_stem,\n num_conv_steps=num_conv_steps,\n version=version,\n dropout_rate=dropout_rate).to(self.device) \\\n for _ in range(self.proxy_num_dropout_samples)]\n \n def fit(self, dataset, opt, mean, std, round_idx):\n self.mean = mean\n self.std = std\n \n last_losses = []\n train_losses = []\n test_losses = []\n time_start = time.time()\n time_last_check = time.time()\n best_test_loss = 1000\n mbsize = self.args.proxy_mbsize\n early_stop_tol = self.args.proxy_early_stop_tol\n early_stop_count = 0\n \n for i in range(self.args.proxy_num_iterations+1):\n s, r = dataset.sample2batch(dataset.sample(mbsize))\n r = (r - mean) / std # (batch_size, num_obj)\n mol_out_s = self._call_models_train(s).mean(0)\n \n loss = F.mse_loss(mol_out_s, r)\n last_losses.append((loss.item(),))\n train_losses.append((loss.item(),))\n opt.zero_grad()\n loss.backward()\n opt.step()\n self.training_steps = i + 1\n \n if not i % 50:\n train_loss = [np.round(np.mean(i), 4)\n for i in zip(*last_losses)]\n last_losses = []\n\n total_test_loss = 0\n\n for s, r in dataset.iterset(max(mbsize, 64), mode='test'):\n with torch.no_grad():\n mol_o = self._call_models_eval(s).mean(0)\n loss = F.mse_loss(mol_o, (r-mean)/std, reduction='sum')\n total_test_loss += loss.item()\n\n test_loss = total_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n test_losses.append(test_loss)\n print('Iter {}: Train Loss {}, Test Loss {}, Time {}'.format(\n i, train_loss[0], round(test_loss, 4), round(time.time() - time_last_check, 3)))\n time_last_check = time.time()\n\n if test_loss < best_test_loss:\n best_test_loss = test_loss\n # best_model = deepcopy(self.proxy)\n # best_model.to('cpu')\n best_params = [[i.data.cpu().numpy() for i in model.parameters()] for model in self.proxy]\n early_stop_count = 0\n if self.args.save:\n self.save(self.args.log_dir, round_idx)\n\n else:\n early_stop_count += 1\n print('Early stop count: {}'.format(early_stop_count))\n\n if early_stop_count >= early_stop_tol:\n print('Early stopping! Training time: {}, Best test loss: {}'.format(\n time.time()-time_start, best_test_loss))\n break\n \n # load best parameters \n for i, model in enumerate(self.proxy):\n for i, besti in zip(model.parameters(), best_params[i]):\n i.data = torch.tensor(besti).to(self.device)\n # self.args.logger.save(self.args.save_path, self.args)\n \n def _call_models_train(self, x):\n for model in self.proxy:\n model.train()\n ys = torch.stack([model(x, None, do_stems=False)[1] for model in self.proxy], dim=0) # (5, 64, 2)\n return ys\n \n def _call_models_eval(self, x):\n for model in self.proxy:\n model.eval()\n ys = torch.stack([model(x, None, do_stems=False)[1] for model in self.proxy], dim=0)\n return ys\n \n def posterior(self, x):\n with torch.no_grad():\n outputs = self._call_models_eval(x)\n posterior = MyPosterior(outputs.mean(dim=0), outputs.var(dim=0))\n posterior.mean = posterior.mean * self.std + self.mean\n posterior.variance = posterior.variance * self.std ** 2\n return posterior\n \n def save(self, checkpoint_dir, round_idx):\n for i, model in enumerate(self.proxy):\n checkpoint_path = os.path.join(checkpoint_dir, f\"{round_idx}_proxy_checkpoint_model_{i}.pth\")\n torch.save(model.state_dict(), checkpoint_path) " }, { "identifier": "GPRegressor", "path": "proxy/regression.py", "snippet": "class GPRegressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, \\\n dropout_rate=0, num_dropout_samples=5, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n self.proxy_num_dropout_samples = num_dropout_samples\n self.args = args\n self.device = args.device\n # self.objective = AnalyticMultiOutputObjective()\n self.NP_DTYPE = np.float32\n fingerprint_func = functools.partial(\n rdMolDescriptors.GetMorganFingerprintAsBitVect,\n radius=self.args.fp_radius,\n nBits=self.args.fp_nbits,\n )\n self.my_smiles_to_fp_array = functools.partial(\n smiles_to_fp_array, fingerprint_func=fingerprint_func\n )\n \n def fit(self, dataset):\n x_train = np.stack([self.my_smiles_to_fp_array(s) for s in dataset.smis]).astype(self.NP_DTYPE) # (200, 1024)\n y_train = pd.DataFrame.from_dict(dataset.scores).values.astype(self.NP_DTYPE) # (200, num_obj)\n x_train = torch.as_tensor(x_train)\n y_train = torch.as_tensor(y_train)\n self.proxy = self.get_trained_gp(X_train=x_train, y_train=y_train).to(self.device)\n \n def get_trained_gp(self, X_train, y_train):\n models = []\n for i in range(y_train.shape[-1]):\n obj = y_train[:, i]\n models.append(TanimotoGP(train_x=X_train, train_y=obj)) # input should be tensor\n model = ModelListGP(*models)\n \n fit_gp_hyperparameters(model)\n \n return model\n \n def posterior(self, x): \n x = self.my_smiles_to_fp_array(Chem.MolToSmiles(x.mol))\n x = torch.as_tensor(x).unsqueeze(0).to(self.device)\n with torch.no_grad():\n posterior = self.proxy.posterior(x) #! oracle scale\n return posterior" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "qUpperConfidenceBound", "path": "utils/acq_func.py", "snippet": "class qUpperConfidenceBound(MCAcquisitionFunction):\n r\"\"\"MC-based batch Upper Confidence Bound.\n\n Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A\n of [Wilson2017reparam].)\n\n `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`\n and `f(X)` has distribution `N(mu, Sigma)`.\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> sampler = SobolQMCNormalSampler(1024)\n >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)\n >>> qucb = qUCB(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n beta: float,\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n posterior_transform: Optional[PosteriorTransform] = None,\n X_pending: Optional[Tensor] = None,\n ) -> None:\n r\"\"\"q-Upper Confidence Bound.\n\n Args:\n model: A fitted model.\n beta: Controls tradeoff between mean and standard deviation in UCB.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n posterior_transform: A PosteriorTransform (optional).\n X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation but have not yet\n been evaluated. Concatenated into X upon forward call. Copied and set to\n have no gradient.\n \"\"\"\n super().__init__(\n model=model,\n sampler=sampler,\n objective=objective,\n posterior_transform=posterior_transform,\n X_pending=X_pending,\n )\n self.beta_prime = math.sqrt(beta * math.pi / 2)\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qUpperConfidenceBound on the candidate set `X`.\n\n Args:\n X: A `batch_sahpe x q x d`-dim Tensor of t-batches with `q` `d`-dim design\n points each.\n\n Returns:\n A `batch_shape'`-dim Tensor of Upper Confidence Bound values at the given\n design points `X`, where `batch_shape'` is the broadcasted batch shape of\n model and input `X`.\n \"\"\"\n posterior = self.model.posterior(\n X=X, posterior_transform=self.posterior_transform\n )\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n mean = obj.mean(dim=0)\n ucb_samples = mean + self.beta_prime * (obj - mean).abs()\n return ucb_samples.max(dim=-1)[0].mean(dim=0)" }, { "identifier": "qExpectedImprovement", "path": "utils/acq_func.py", "snippet": "class qExpectedImprovement(MCAcquisitionFunction):\n r\"\"\"MC-based batch Expected Improvement.\n\n This computes qEI by\n (1) sampling the joint posterior over q points\n (2) evaluating the improvement over the current best for each sample\n (3) maximizing over q\n (4) averaging over the samples\n\n `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> best_f = train_Y.max()[0]\n >>> sampler = SobolQMCNormalSampler(1024)\n >>> qEI = qExpectedImprovement(model, best_f, sampler)\n >>> qei = qEI(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n best_f: Union[float, Tensor],\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n posterior_transform: Optional[PosteriorTransform] = None,\n X_pending: Optional[Tensor] = None,\n **kwargs: Any,\n ) -> None:\n r\"\"\"q-Expected Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless). Can be\n a `batch_shape`-shaped tensor, which in case of a batched model\n specifies potentially different values for each element of the batch.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are evaluated.\n Defaults to `IdentityMCObjective()`.\n posterior_transform: A PosteriorTransform (optional).\n X_pending: A `m x d`-dim Tensor of `m` design points that have been\n submitted for function evaluation but have not yet been evaluated.\n Concatenated into X upon forward call. Copied and set to have no\n gradient.\n \"\"\"\n super().__init__(\n model=model,\n sampler=sampler,\n objective=objective,\n posterior_transform=posterior_transform,\n X_pending=X_pending,\n )\n self.register_buffer(\"best_f\", torch.as_tensor(best_f, dtype=float))\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design\n points each.\n\n Returns:\n A `batch_shape'`-dim Tensor of Expected Improvement values at the given\n design points `X`, where `batch_shape'` is the broadcasted batch shape of\n model and input `X`.\n \"\"\"\n posterior = self.model.posterior(\n X=X, posterior_transform=self.posterior_transform\n )\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n obj = (obj - self.best_f.unsqueeze(-1).to(obj)).clamp_min(0)\n q_ei = obj.max(dim=-1)[0].mean(dim=0)\n return q_ei" } ]
import numpy as np import pandas as pd import os import torch import torch.nn as nn import torch.nn.functional as F import time from proxy.regression import Regressor, DropoutRegressor, EvidentialRegressor, EnsembleRegressor, GPRegressor from mol_mdp_ext import MolMDPExtended from botorch.utils.multi_objective.box_decompositions.non_dominated import FastNondominatedPartitioning from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement from botorch.acquisition.analytic import UpperConfidenceBound, ExpectedImprovement from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import normalize, unnormalize from botorch.acquisition.objective import GenericMCObjective from botorch.acquisition.objective import ScalarizedPosteriorTransform from botorch.utils.multi_objective.pareto import is_non_dominated from botorch.sampling.samplers import SobolQMCNormalSampler from sklearn.model_selection import train_test_split from utils.acq_func import qUpperConfidenceBound, qExpectedImprovement from copy import copy, deepcopy
11,173
print('initialize from %s Done!' % self.args.proxy_init_checkpoint) def get_partitioning(self, dataset): ys = [] for s, r in dataset.iterset(self.args.proxy_mbsize, 'train'): y = r ys.append(y) ys = torch.cat(ys, dim=0) self.mean = torch.mean(ys, dim=0, keepdim=True) self.std = torch.std(ys, dim=0, keepdim=True) self.proxy.mean = self.mean self.proxy.std = self.std return FastNondominatedPartitioning(ref_point=self.ref_point, Y=ys) def update(self, dataset, round_idx, reset=False): print("Training surrogate function...") if reset: self.init_model() self.partitioning = self.get_partitioning(dataset) if self.args.proxy_uncertainty == 'GP': self.proxy.fit(dataset) else: self.proxy.fit(dataset, self.opt, self.mean, self.std, round_idx) def __call__(self, m, weights=None): raise NotImplementedError class NoAF(Proxy): def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=torch.zeros(0, len(self.args.objectives)))) mean = self.proxy.posterior(m).mean return ((weights * mean).sum(), mean.squeeze()) class UCB(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) self.score_clip = torch.tensor([0.6, 0.6, 0.7, 0.7]).unsqueeze(0).to(args.device) self.args = args def upper_confidence_bound(self, mu: np.array, var: np.array, beta: float): return mu + (beta * var).sqrt() def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance # oracle scale normalize_mean = normalize(mean, Y_bounds) # [0, 1] scale new_mean = normalize_mean.matmul(weights.t()).squeeze() # weighted_sum scalarization new_weights = weights / (Y_bounds[1]-Y_bounds[0]) new_variance = (variance * new_weights**2).sum(1) raw_reward = self.upper_confidence_bound(mu=new_mean, var=new_variance, beta=self.beta) return raw_reward, mean.squeeze() class UCB_chebyshev(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) # oracle scale mean = posterior.mean variance = posterior.variance # * chebyshev_scalarization acq_func = qUpperConfidenceBound( model=self.proxy, objective=objective, beta=self.beta, # 0.1 sampler=self.sampler) return (acq_func(m), mean.squeeze()) class EI(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance
# from botorch.acquisition.monte_carlo import qUpperConfidenceBound, qExpectedImprovement def make_proxy_model(args, mdp): repr_type = args.proxy_repr_type nemb = args.proxy_nemb num_conv_steps = args.proxy_num_conv_steps model_version = args.proxy_model_version if args.proxy_uncertainty == "none": model = Regressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) if args.proxy_uncertainty == "dropout": model = DropoutRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'ensemble': model = EnsembleRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'evidential': model = EvidentialRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) elif args.proxy_uncertainty == 'GP': model = GPRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) model.to(args.device) if args.floatX == 'float64': model = model.double() return model def get_proxy(args, bpath, oracle): if args.acq_fn.lower() == 'none': return NoAF(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb': return UCB(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb_chebyshev': return UCB_chebyshev(args, bpath, oracle) elif args.acq_fn.lower() == 'ei': return EI(args, bpath, oracle) class Proxy: def __init__(self, args, bpath, oracle): self.args = args self.ref_point = torch.zeros(len(args.objectives)).to(args.device) self.oracle = oracle self.device = args.device self.mdp = MolMDPExtended(bpath) self.mdp.post_init(args.device, args.proxy_repr_type) if args.floatX == 'float64': self.mdp.floatX = torch.double else: self.mdp.floatX = torch.float self.init_model() def init_model(self): self.proxy = make_proxy_model(self.args, self.mdp) if self.args.proxy_uncertainty == 'ensemble': self.params = sum([list(model.parameters()) for model in self.proxy.proxy], []) self.opt = torch.optim.Adam(self.params, self.args.proxy_learning_rate, weight_decay=self.args.proxy_weight_decay) elif self.args.proxy_uncertainty == 'GP': pass else: self.opt = torch.optim.Adam(self.proxy.parameters(), self.args.proxy_learning_rate, weight_decay=self.args.proxy_weight_decay) def initialize_from_checkpoint(self): checkpoint = torch.load( self.args.proxy_init_checkpoint, map_location=self.device) self.proxy.proxy.load_state_dict(checkpoint) print('initialize from %s Done!' % self.args.proxy_init_checkpoint) def get_partitioning(self, dataset): ys = [] for s, r in dataset.iterset(self.args.proxy_mbsize, 'train'): y = r ys.append(y) ys = torch.cat(ys, dim=0) self.mean = torch.mean(ys, dim=0, keepdim=True) self.std = torch.std(ys, dim=0, keepdim=True) self.proxy.mean = self.mean self.proxy.std = self.std return FastNondominatedPartitioning(ref_point=self.ref_point, Y=ys) def update(self, dataset, round_idx, reset=False): print("Training surrogate function...") if reset: self.init_model() self.partitioning = self.get_partitioning(dataset) if self.args.proxy_uncertainty == 'GP': self.proxy.fit(dataset) else: self.proxy.fit(dataset, self.opt, self.mean, self.std, round_idx) def __call__(self, m, weights=None): raise NotImplementedError class NoAF(Proxy): def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=torch.zeros(0, len(self.args.objectives)))) mean = self.proxy.posterior(m).mean return ((weights * mean).sum(), mean.squeeze()) class UCB(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) self.score_clip = torch.tensor([0.6, 0.6, 0.7, 0.7]).unsqueeze(0).to(args.device) self.args = args def upper_confidence_bound(self, mu: np.array, var: np.array, beta: float): return mu + (beta * var).sqrt() def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance # oracle scale normalize_mean = normalize(mean, Y_bounds) # [0, 1] scale new_mean = normalize_mean.matmul(weights.t()).squeeze() # weighted_sum scalarization new_weights = weights / (Y_bounds[1]-Y_bounds[0]) new_variance = (variance * new_weights**2).sum(1) raw_reward = self.upper_confidence_bound(mu=new_mean, var=new_variance, beta=self.beta) return raw_reward, mean.squeeze() class UCB_chebyshev(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): if self.args.proxy_uncertainty != 'GP': m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) # oracle scale mean = posterior.mean variance = posterior.variance # * chebyshev_scalarization acq_func = qUpperConfidenceBound( model=self.proxy, objective=objective, beta=self.beta, # 0.1 sampler=self.sampler) return (acq_func(m), mean.squeeze()) class EI(Proxy): def __init__(self, args, bpath, oracle): super().__init__(args, bpath, oracle) self.beta = args.beta self.sampler = SobolQMCNormalSampler(128) def __call__(self, m, weights=None): m = self.mdp.mols2batch([self.mdp.mol2repr(m)]) m.dtype = m.x.dtype Y_bounds = torch.stack([self.partitioning.Y.min( dim=-2).values, self.partitioning.Y.max(dim=-2).values]) objective = GenericMCObjective(get_chebyshev_scalarization( weights=weights.squeeze(), Y=self.partitioning.Y)) posterior = self.proxy.posterior(m) mean = posterior.mean variance = posterior.variance
acq_func = qExpectedImprovement(
7
2023-10-24 14:10:35+00:00
16k
SALT-NLP/Efficient_Unlearning
src/models/transformers/parameter-efficient-finetuning/heads/base.py
[ { "identifier": "ImageClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class ImageClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of image classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states\n (also called feature maps) of the model at the output of each stage.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "MultipleChoiceModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class MultipleChoiceModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of multiple choice models.\n\n Args:\n loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):\n *num_choices* is the second dimension of the input tensors. (see *input_ids* above).\n\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "QuestionAnsweringModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class QuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of question answering models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n start_logits: torch.FloatTensor = None\n end_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "Seq2SeqModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class Seq2SeqModelOutput(ModelOutput):\n \"\"\"\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the decoder of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "Seq2SeqQuestionAnsweringModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sequence-to-sequence question answering models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n start_logits: torch.FloatTensor = None\n end_logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "Seq2SeqSequenceClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class Seq2SeqSequenceClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sequence-to-sequence sentence classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "SequenceClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class SequenceClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sentence classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "TokenClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class TokenClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of token classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "ModelOutput", "path": "src/models/transformers/utils/generic.py", "snippet": "class ModelOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n if not all(field.default is None for field in class_fields[1:]):\n raise ValueError(f\"{self.__class__.__name__} should not have more than one required field.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and not is_tensor(first_field):\n if isinstance(first_field, dict):\n iterator = first_field.items()\n first_field_iterator = True\n else:\n try:\n iterator = iter(first_field)\n first_field_iterator = True\n except TypeError:\n first_field_iterator = False\n\n # if we provided an iterator as first field and the iterator is a (key, value) iterator\n # set the associated fields\n if first_field_iterator:\n for element in iterator:\n if (\n not isinstance(element, (list, tuple))\n or not len(element) == 2\n or not isinstance(element[0], str)\n ):\n break\n setattr(self, element[0], element[1])\n if element[1] is not None:\n self[element[0]] = element[1]\n elif first_field is not None:\n self[class_fields[0].name] = first_field\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = {k: v for (k, v) in self.items()}\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "AdapterCompositionBlock", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class AdapterCompositionBlock(Sequence):\n def __init__(self, *children):\n self.children = [parse_composition(b, None) for b in children]\n\n def __getitem__(self, key):\n return self.children[key]\n\n def __len__(self):\n return len(self.children)\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, type(self)):\n return all([c1 == c2 for c1, c2 in zip(self.children, o.children)])\n else:\n return False\n\n def __repr__(self):\n child_repr = \", \".join(map(str, self.children))\n return f\"{self.__class__.__name__}[{child_repr}]\"\n\n def first(self):\n if not isinstance(self.children[0], AdapterCompositionBlock):\n return self.children[0]\n else:\n return self.children[0].first()\n\n def last(self):\n if not isinstance(self.children[-1], AdapterCompositionBlock):\n return self.children[-1]\n else:\n return self.children[-1].last()\n\n @property\n def parallel_channels(self):\n return max([b.parallel_channels if isinstance(b, AdapterCompositionBlock) else 1 for b in self.children])\n\n def flatten(self) -> Set[str]:\n return set(itertools.chain(*[[b] if isinstance(b, str) else b.flatten() for b in self.children]))" }, { "identifier": "BatchSplit", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class BatchSplit(AdapterCompositionBlock):\n def __init__(self, *split_adapters: List[Union[AdapterCompositionBlock, str]], batch_sizes: Union[List[int], int]):\n super().__init__(*split_adapters)\n self.batch_sizes = batch_sizes if isinstance(batch_sizes, list) else [batch_sizes] * len(split_adapters)" }, { "identifier": "Parallel", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class Parallel(AdapterCompositionBlock):\n def __init__(self, *parallel_adapters: List[str]):\n \"\"\"\n Can be used to perform inference for multiple tasks (i.e., adapters) in parallel (for the same input).\n\n See AdapterDrop https://arxiv.org/abs/2010.11918\n \"\"\"\n super().__init__(*parallel_adapters)\n\n @property\n def parallel_channels(self):\n return len(self.children)" }, { "identifier": "parse_heads_from_composition", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "def parse_heads_from_composition(adapter_composition, reference_heads: list = None):\n \"\"\"\n Parses a potential head configuration from a setup of adapters.\n\n Args:\n adapter_composition: The adapter setup to be parsed.\n reference_heads: The list of available to validate the retrieved head configuration against.\n \"\"\"\n final_block = adapter_composition\n if isinstance(final_block, Stack):\n final_block = final_block.children[-1]\n\n if isinstance(final_block, str) and (reference_heads is None or final_block in reference_heads):\n return final_block\n elif isinstance(final_block, Parallel):\n return [a if isinstance(a, str) else a.last() for a in final_block.children]\n elif isinstance(final_block, BatchSplit):\n # Convert BatchSplit of adapters to a BatchSplit of heads.\n blocks = [block.last() if isinstance(block, AdapterCompositionBlock) else block for block in final_block]\n head_setup = BatchSplit(*blocks, batch_sizes=final_block.batch_sizes)\n if reference_heads is None or all(head in reference_heads for head in head_setup):\n return head_setup\n else:\n raise ValueError(\n \"Missing at least one head for the given BatchSplit setup. Expected heads: {}\".format(blocks)\n )\n else:\n return None" }, { "identifier": "AdapterSetup", "path": "src/models/transformers/parameter-efficient-finetuning/context.py", "snippet": "class AdapterSetup:\n \"\"\"\n Represents an adapter setup of a model including active adapters and active heads. This class is intended to be\n used as a context manager using the ``with`` statement. The setup defined by the ``AdapterSetup`` context will\n override static adapter setups defined in a model (i.e. setups specified via ``active_adapters``).\n\n Example::\n\n with AdapterSetup(Stack(\"a\", \"b\")):\n # will use the adapter stack \"a\" and \"b\" outputs = model(**inputs)\n\n Note that the context manager is thread-local, i.e. it can be used with different setups in a multi-threaded\n environment.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n def __init__(self, adapter_setup, head_setup=None, ignore_empty: bool = False):\n self.adapter_setup = parse_composition(adapter_setup)\n if head_setup:\n self.head_setup = head_setup\n else:\n self.head_setup = parse_heads_from_composition(self.adapter_setup)\n self._empty = ignore_empty and self.adapter_setup is None and self.head_setup is None\n\n def __enter__(self):\n if not self._empty:\n AdapterSetup.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n if not self._empty:\n AdapterSetup.get_contexts().pop()\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None\n\n @classmethod\n def get_context_adapter_setup(cls):\n context = cls.get_context()\n if context:\n return context.adapter_setup\n return None\n\n @classmethod\n def get_context_head_setup(cls):\n context = cls.get_context()\n if context:\n return context.head_setup\n return None" }, { "identifier": "ForwardContext", "path": "src/models/transformers/parameter-efficient-finetuning/context.py", "snippet": "class ForwardContext:\n \"\"\"\n Holds context information during a forward pass through a model. This class should be used via the\n ``ForwardContext.wrap()`` method.\n\n Note that the context is thread-local.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n context_attributes = [\"adapter_gating_scores\", \"adapter_fusion_attentions\", \"adapter_input_parallelized\"]\n\n def __init__(self, model, *args, **kwargs):\n # If the model has a method ``forward_context()``, use it to create the context.\n if hasattr(model, \"forward_context\"):\n model.forward_context(self, *args, **kwargs)\n\n def __enter__(self):\n ForwardContext.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n ForwardContext.get_contexts().pop()\n\n @classmethod\n def wrap(cls, f):\n \"\"\"\n Decorator method that wraps a ``forward()`` function of a model class.\n \"\"\"\n\n @functools.wraps(f)\n def wrapper_func(self, *args, **kwargs):\n if self.config.adapters is not None:\n with cls(self, *args, **kwargs) as ctx:\n kwargs = {\n k: v for k, v in kwargs.items() if k.replace(\"output_\", \"\") not in cls.context_attributes\n }\n results = f(self, *args, **kwargs)\n\n # append output attributes\n if isinstance(results, tuple):\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results = results + (dict(getattr(ctx, attr)),)\n else:\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results[attr] = dict(getattr(ctx, attr))\n return results\n else:\n return f(self, *args, **kwargs)\n\n return wrapper_func\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None" }, { "identifier": "ModelWithHeadsAdaptersMixin", "path": "src/models/transformers/parameter-efficient-finetuning/model_mixin.py", "snippet": "class ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):\n \"\"\"\n Mixin adding support for loading/ saving adapters to transformer models with head(s).\n \"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n self._convert_to_flex_head = False\n\n def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:\n \"\"\"\n Iterates over all layers of the model.\n \"\"\"\n if self.base_model is self:\n return super().iter_layers()\n else:\n return self.base_model.iter_layers()\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n adapter_name (str): The name of the adapter module to be added.\n config (str or dict, optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional):\n Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional):\n Set the adapter to be the active one. By default (False), the adapter is added but not activated.\n\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n else:\n self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"\n Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class\n that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter(adapter_setup, train_embeddings)\n else:\n self.base_model.train_adapter(adapter_setup, train_embeddings)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"\n Sets the model into mode for training of adapter fusion determined by a list of adapter names. If\n self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n else:\n self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def save_head(self, save_directory: str, head_name: str = None):\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, name=head_name)\n\n def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):\n loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)\n return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_adapter(\n save_directory,\n adapter_name,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = None,\n with_head: bool = True,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(\n PredictionHeadLoader(\n self,\n error_on_missing=False,\n convert_to_flex_head=self._convert_to_flex_head,\n )\n )\n # Support passing a num_labels for compatibility reasons. Convert to label map here.\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None:\n id2label = {i: \"LABEL_\" + str(i) for i in range(num_labels)}\n return super().load_adapter(\n adapter_name_or_path,\n config=config,\n version=version,\n model_name=model_name,\n load_as=load_as,\n source=source,\n custom_weights_loaders=custom_weights_loaders,\n leave_out=leave_out,\n id2label=id2label,\n set_active=set_active,\n **kwargs,\n )\n\n def save_all_adapters(\n self,\n save_directory: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n os.makedirs(save_directory, exist_ok=True)\n for name in self.config.adapters:\n adapter_config = self.config.adapters.get(name)\n h = get_adapter_config_hash(adapter_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter(\n save_path,\n name,\n meta_dict=meta_dict,\n with_head=with_head,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n with_head: Union[bool, str] = False,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n with_head (Union[bool, str]):\n If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used\n as the name of the head to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)\n\n if with_head:\n # Make sure to cover the different options for adapter_names\n if isinstance(with_head, str):\n head_name = with_head\n elif isinstance(adapter_names, Fuse):\n head_name = adapter_names.name\n elif isinstance(adapter_names, list):\n head_name = \",\".join(adapter_names)\n else:\n head_name = adapter_names\n if head_name not in self.heads:\n raise ValueError(\"No head with name {} found\".format(head_name))\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, head_name)\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n with_head: bool = True,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)\n\n def save_all_heads(self, save_directory):\n os.makedirs(save_directory, exist_ok=True)\n for head_name in self.heads:\n save_path = join(save_directory, head_name)\n self.save_head(save_path, head_name)\n\n def get_labels(self):\n return list(self.config.id2label.values())\n\n def get_labels_dict(self):\n return self.config.id2label\n\n def get_adapter(self, name):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n return super().get_adapter(name)\n else:\n return self.base_model.get_adapter(name)" }, { "identifier": "Activation_Function_Class", "path": "src/models/transformers/parameter-efficient-finetuning/modeling.py", "snippet": "class Activation_Function_Class(nn.Module):\n \"\"\"\n Implementation of various activation function.\n \"\"\"\n\n def __init__(self, hidden_act):\n super().__init__()\n if hidden_act.lower() == \"leakyrelu\":\n self.f = nn.functional.leaky_relu\n else:\n self.f = get_activation(hidden_act.lower())\n\n def forward(self, x):\n return self.f(x)" } ]
import logging import torch from dataclasses import dataclass from typing import List, Optional, Union from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import ( ImageClassifierOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...utils import ModelOutput from ..composition import AdapterCompositionBlock, BatchSplit, Parallel, parse_heads_from_composition from ..context import AdapterSetup, ForwardContext from ..model_mixin import ModelWithHeadsAdaptersMixin from ..modeling import Activation_Function_Class
12,507
id2label=None, ): super().__init__(head_name) self.config = { "head_type": "question_answering", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): sequence_output = outputs[0] logits = super().forward(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) start_positions = kwargs.pop("start_positions", None) end_positions = kwargs.pop("end_positions", None) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = ( start_logits, end_logits, ) + outputs[1:] if total_loss is not None: outputs = (total_loss,) + outputs return outputs def get_label_names(self): return ["start_positions", "end_positions"] class ImageClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", multilabel=False, id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "image_classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "multilabel": multilabel, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) elif self.config["multilabel"]: loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict:
logger = logging.getLogger(__name__) @dataclass class MultiHeadOutput(ModelOutput): head_outputs: List[ModelOutput] = None loss: Optional[torch.FloatTensor] = None @property def logits(self): return torch.vstack([outputs["logits"] for outputs in self.head_outputs]) def __getitem__(self, k): # with number indices the head output at that position is accessed # e.g output[1] is equivalent to output.head_outputs[1] if isinstance(k, int): return self.head_outputs[k] # with strings the attribute in the underlying dict can be adressed # e.g output["loss"] is equivalent to output.loss else: return super().__getitem__(k) def __setitem__(self, k, v): if isinstance(k, int): self.head_outputs[k] = v else: return super().__setitem__(k, v) def __iter__(self): # iterates over the head outputs return iter(self.head_outputs) def __len__(self): return len(self.head_outputs) # Let this class inherit from nn.Sequential to provide iterable access as before class PredictionHead(nn.Sequential): def __init__(self, name): super().__init__() self.config = {} self.name = name def build(self, model): model_config = model.config pred_head = [] dropout_prob = self.config.get("dropout_prob", model_config.hidden_dropout_prob) bias = self.config.get("bias", True) for l_id in range(self.config["layers"]): if dropout_prob > 0: pred_head.append(nn.Dropout(dropout_prob)) if l_id < self.config["layers"] - 1: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) else: if "num_labels" in self.config: pred_head.append(nn.Linear(model_config.hidden_size, self.config["num_labels"], bias=bias)) elif "num_choices" in self.config: # used for multiple_choice head pred_head.append(nn.Linear(model_config.hidden_size, 1, bias=bias)) else: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size, bias=bias)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) for i, module in enumerate(pred_head): self.add_module(str(i), module) self.apply(model._init_weights) self.train(model.training) # make sure training mode is consistent def get_output_embeddings(self): return None # override for heads with output embeddings def get_label_names(self): return ["labels"] class ClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class MultiLabelClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "multilabel_classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: loss_fct = BCEWithLogitsLoss() if labels.dtype != torch.float32: labels = labels.float() loss = loss_fct(logits, labels) if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class MultipleChoiceHead(PredictionHead): def __init__( self, model, head_name, num_choices=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, ): super().__init__(head_name) self.config = { "head_type": "multiple_choice", "num_choices": num_choices, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=None, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) logits = logits.view(-1, self.config["num_choices"]) loss = None labels = kwargs.pop("labels", None) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits, labels) if return_dict: return MultipleChoiceModelOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class TaggingHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=1, activation_function="tanh", id2label=None, ): super().__init__(head_name) self.config = { "head_type": "tagging", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): logits = super().forward(outputs[0]) loss = None labels = kwargs.pop("labels", None) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.config["num_labels"]) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict: return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = (logits,) + outputs[1:] if labels is not None: outputs = (loss,) + outputs return outputs class QuestionAnsweringHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=1, activation_function="tanh", id2label=None, ): super().__init__(head_name) self.config = { "head_type": "question_answering", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): sequence_output = outputs[0] logits = super().forward(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) start_positions = kwargs.pop("start_positions", None) end_positions = kwargs.pop("end_positions", None) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else: return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: outputs = ( start_logits, end_logits, ) + outputs[1:] if total_loss is not None: outputs = (total_loss,) + outputs return outputs def get_label_names(self): return ["start_positions", "end_positions"] class ImageClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", multilabel=False, id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "image_classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "multilabel": multilabel, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) elif self.config["multilabel"]: loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict:
return ImageClassifierOutput(
0
2023-10-18 18:05:54+00:00
16k
nchen909/Pass-Tuning
models_list/bitfit/modeling_auto.py
[ { "identifier": "PLBartForConditionalGeneration", "path": "models_list/bitfit/modeling_plbart.py", "snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.version\",\n r\"decoder.version\",\n r\"lm_head.weight\",\n ]\n\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n self.model = PLBartModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(PLBART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n labels: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n **kwargs,\n ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids: torch.LongTensor,\n past: Optional[List[torch.FloatTensor]] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n **kwargs # TODO: Check if this is needed. It is unused?\n ) -> Dict[str, Any]:\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "PLBartModel", "path": "models_list/bitfit/modeling_plbart.py", "snippet": "class PLBartModel(PLBartPreTrainedModel):\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = PLBartEncoder(config, self.shared)\n self.decoder = PLBartDecoder(config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # different to other models, PLBart automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )" }, { "identifier": "T5ForConditionalGeneration", "path": "models_list/bitfit/modeling_t5.py", "snippet": "class T5ForConditionalGeneration(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n r\"lm_head\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.model_dim = config.d_model\r\n\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.lm_head = self.lm_head.to(self.decoder.first_device)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.lm_head = self.lm_head.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def set_output_embeddings(self, new_embeddings):\r\n self.lm_head = new_embeddings\r\n\r\n def get_output_embeddings(self):\r\n return self.lm_head\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n labels=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n past_prompt=None, # modified\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,\r\n config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for\r\n labels in ``[0, ..., config.vocab_size]``\r\n\r\n Returns:\r\n\r\n Examples::\r\n\r\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids\r\n >>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids\r\n >>> outputs = model(input_ids=input_ids, labels=labels)\r\n >>> loss = outputs.loss\r\n >>> logits = outputs.logits\r\n\r\n >>> input_ids = tokenizer(\"summarize: studies have shown that owning a dog is good for you \", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model.generate(input_ids)\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n # Convert encoder inputs in embeddings if needed\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n\r\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\r\n # get decoder inputs from shifting lm labels to the right\r\n decoder_input_ids = self._shift_right(labels)\r\n\r\n # If decoding with past key value states, only the last tokens\r\n # should be given as an input\r\n if past_key_values is not None:\r\n assert labels is None, \"Decoder should not use cached key value states when training.\"\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids[:, -1:]\r\n if decoder_inputs_embeds is not None:\r\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n\r\n sequence_output = decoder_outputs[0]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.encoder.first_device)\r\n self.lm_head = self.lm_head.to(self.encoder.first_device)\r\n sequence_output = sequence_output.to(self.lm_head.weight.device)\r\n\r\n if self.config.tie_word_embeddings:\r\n # Rescale output before projecting on vocab\r\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\r\n sequence_output = sequence_output * (self.model_dim ** -0.5)\r\n\r\n lm_logits = self.lm_head(sequence_output)\r\n\r\n loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss(ignore_index=-100)\r\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\r\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\r\n\r\n if not return_dict:\r\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return Seq2SeqLMOutput(\r\n loss=loss,\r\n logits=lm_logits,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r\n\r\n def prepare_inputs_for_generation(\r\n self,\r\n input_ids,\r\n past=None,\r\n attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n use_cache=None,\r\n encoder_outputs=None,\r\n **kwargs\r\n ):\r\n\r\n # cut decoder_input_ids if past is used\r\n if past is not None:\r\n input_ids = input_ids[:, -1:]\r\n\r\n return {\r\n \"decoder_input_ids\": input_ids,\r\n \"past_key_values\": past,\r\n \"encoder_outputs\": encoder_outputs,\r\n \"attention_mask\": attention_mask,\r\n \"head_mask\": head_mask,\r\n \"decoder_head_mask\": decoder_head_mask,\r\n \"cross_attn_head_mask\": cross_attn_head_mask,\r\n \"use_cache\": use_cache,\r\n # \"past_prompt\": kwargs['past_prompt'], # modified\r\n }\r\n\r\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\r\n return self._shift_right(labels)\r\n\r\n def _reorder_cache(self, past, beam_idx):\r\n # if decoder past is not included in output\r\n # speedy decoding is disabled and no need to reorder\r\n if past is None:\r\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\r\n return past\r\n\r\n reordered_decoder_past = ()\r\n for layer_past_states in past:\r\n # get the correct batch idx from layer past batch dim\r\n # batch dim of `past` is at 2nd position\r\n reordered_layer_past_states = ()\r\n for layer_past_state in layer_past_states:\r\n # need to set correct `past` for each of the four key / value states\r\n reordered_layer_past_states = reordered_layer_past_states + (\r\n layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),\r\n )\r\n\r\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\r\n assert len(reordered_layer_past_states) == len(layer_past_states)\r\n\r\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\r\n return reordered_decoder_past\r" }, { "identifier": "T5Model", "path": "models_list/bitfit/modeling_t5.py", "snippet": "class T5Model(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config: T5Config):\r\n super().__init__(config)\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"\r\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\r\n class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n Returns:\r\n\r\n Example::\r\n\r\n >>> from transformers import T5Tokenizer, T5Model\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5Model.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\r\n\r\n >>> last_hidden_states = outputs.last_hidden_state\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n if not return_dict:\r\n return decoder_outputs + encoder_outputs\r\n\r\n return Seq2SeqModelOutput(\r\n last_hidden_state=decoder_outputs.last_hidden_state,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r" } ]
import warnings from collections import OrderedDict from transformers.utils import logging from transformers.models.albert.modeling_albert import ( AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from .modeling_plbart import ( PLBartForConditionalGeneration, PLBartModel, ) from transformers.models.bart.modeling_bart import ( BartForCausalLM, BartForQuestionAnswering, BartForSequenceClassification, ) from transformers.models.bert.modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder from transformers.models.big_bird.modeling_big_bird import ( BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, ) from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, ) from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, ) from transformers.models.camembert.modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from transformers.models.canine.modeling_canine import ( CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, ) from transformers.models.clip.modeling_clip import CLIPModel from transformers.models.convbert.modeling_convbert import ( ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel from transformers.models.deberta.modeling_deberta import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2ForMaskedLM, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, ) from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel from transformers.models.distilbert.modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder from transformers.models.electra.modeling_electra import ( ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel from transformers.models.flaubert.modeling_flaubert import ( FlaubertForMultipleChoice, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel from transformers.models.funnel.modeling_funnel import ( FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel from transformers.models.hubert.modeling_hubert import HubertModel from transformers.models.ibert.modeling_ibert import ( IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) from transformers.models.led.modeling_led import ( LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, ) from transformers.models.longformer.modeling_longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.luke.modeling_luke import LukeModel from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel from transformers.models.mbart.modeling_mbart import ( MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, ) from transformers.models.megatron_bert.modeling_megatron_bert import ( MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) from transformers.models.mpnet.modeling_mpnet import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, RagSequenceForGeneration, RagTokenForGeneration, ) from transformers.models.reformer.modeling_reformer import ( ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerModel, ReformerModelWithLMHead, ) from transformers.models.retribert.modeling_retribert import RetriBertModel from transformers.models.roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel from transformers.models.squeezebert.modeling_squeezebert import ( SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) from .modeling_t5 import T5ForConditionalGeneration, T5Model from transformers.models.tapas.modeling_tapas import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, ) from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model from transformers.models.xlm.modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import ( XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from transformers.models.xlm_roberta.modeling_xlm_roberta import ( XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from transformers.models.xlnet.modeling_xlnet import ( XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update from transformers.models.auto.configuration_auto import ( AlbertConfig, PLBartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, CanineConfig, CLIPConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DeiTConfig, DetrConfig, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, GPTNeoConfig, HubertConfig, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LukeConfig, LxmertConfig, M2M100Config, MarianConfig, MBartConfig, MegatronBertConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, ReformerConfig, RetriBertConfig, RobertaConfig, RoFormerConfig, Speech2TextConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, VisualBertConfig, ViTConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, )
11,353
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
(PLBartConfig, PLBartForConditionalGeneration),
0
2023-10-20 09:24:44+00:00
16k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES = getattr(settings, 'DJANGO_LEDGER_USE_CLOSING_ENTRIES', False)\nDJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT = getattr(settings,\n 'DJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT', 3600)\nDJANGO_LEDGER_LOGIN_URL = getattr(settings, 'DJANGO_LEDGER_LOGIN_URL', settings.LOGIN_URL)\nDJANGO_LEDGER_BILL_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_INVOICE_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_FORM_INPUT_CLASSES = getattr(settings, 'DJANGO_LEDGER_FORM_INPUT_CLASSES', 'input')\nDJANGO_LEDGER_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_CURRENCY_SYMBOL', '$')\nDJANGO_LEDGER_SPACED_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_SPACED_CURRENCY_SYMBOL', False)\nDJANGO_LEDGER_SHOW_FEEDBACK_BUTTON = getattr(settings, 'DJANGO_LEDGER_SHOW_FEEDBACK_BUTTON', False)\nDJANGO_LEDGER_FEEDBACK_EMAIL_LIST = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_EMAIL_LIST', [])\nDJANGO_LEDGER_FEEDBACK_FROM_EMAIL = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_FROM_EMAIL', None)\nDJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME = getattr(settings, 'DJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME', False)\nDJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE', Decimal('0.02'))\nDJANGO_LEDGER_TRANSACTION_CORRECTION = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_CORRECTION', Decimal('0.01'))\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE', True)\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', 5)\nDJANGO_LEDGER_ACCOUNT_CODE_USE_PREFIX = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', True)\nDJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')\nDJANGO_LEDGER_PO_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PO_NUMBER_PREFIX', 'PO')\nDJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX', 'E')\nDJANGO_LEDGER_INVOICE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_PREFIX', 'I')\nDJANGO_LEDGER_BILL_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_PREFIX', 'B')\nDJANGO_LEDGER_VENDOR_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_VENDOR_NUMBER_PREFIX', 'V')\nDJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')\nDJANGO_LEDGER_EXPENSE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX', 'IEX')\nDJANGO_LEDGER_INVENTORY_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX', 'INV')\nDJANGO_LEDGER_PRODUCT_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX', 'IPR')\nDJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)\nDJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')\nDJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.bill.BillModelAbstract')\nDJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.invoice.InvoiceModelAbstract')\nDJANGO_LEDGER_DEFAULT_COA = getattr(settings, 'DJANGO_LEDGER_DEFAULT_COA', None)\nDJANGO_LEDGER_FINANCIAL_ANALYSIS = {\n 'ratios': {\n 'current_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'quick_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'debt_to_equity': {\n 'good_incremental': False,\n 'ranges': {\n 'healthy': 0,\n 'watch': .25,\n 'warning': .5,\n 'critical': 1\n }\n },\n 'return_on_equity': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .07,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'return_on_assets': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'net_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'gross_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n }\n}" }, { "identifier": "InvalidDateInputError", "path": "django_ledger/exceptions.py", "snippet": "class InvalidDateInputError(ValidationError):\n pass" }, { "identifier": "TransactionNotInBalanceError", "path": "django_ledger/exceptions.py", "snippet": "class TransactionNotInBalanceError(ValidationError):\n pass" }, { "identifier": "roles", "path": "django_ledger/io/roles.py", "snippet": "DEBIT = 'debit'\nCREDIT = 'credit'\nASSET_CA_CASH = 'asset_ca_cash'\nASSET_CA_MKT_SECURITIES = 'asset_ca_mkt_sec'\nASSET_CA_RECEIVABLES = 'asset_ca_recv'\nASSET_CA_INVENTORY = 'asset_ca_inv'\nASSET_CA_UNCOLLECTIBLES = 'asset_ca_uncoll'\nASSET_CA_PREPAID = 'asset_ca_prepaid'\nASSET_CA_OTHER = 'asset_ca_other'\nASSET_LTI_NOTES_RECEIVABLE = 'asset_lti_notes'\nASSET_LTI_LAND = 'asset_lti_land'\nASSET_LTI_SECURITIES = 'asset_lti_sec'\nASSET_PPE_BUILDINGS = 'asset_ppe_build'\nASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION = 'asset_ppe_build_accum_depr'\nASSET_PPE_EQUIPMENT = 'asset_ppe_equip'\nASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION = 'asset_ppe_equip_accum_depr'\nASSET_PPE_PLANT = 'asset_ppe_plant'\nASSET_PPE_PLANT_ACCUM_DEPRECIATION = 'asset_ppe_plant_depr'\nASSET_INTANGIBLE_ASSETS = 'asset_ia'\nASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION = 'asset_ia_accum_amort'\nASSET_ADJUSTMENTS = 'asset_adjustment'\nLIABILITY_CL_ACC_PAYABLE = 'lia_cl_acc_payable'\nLIABILITY_CL_WAGES_PAYABLE = 'lia_cl_wages_payable'\nLIABILITY_CL_TAXES_PAYABLE = 'lia_cl_taxes_payable'\nLIABILITY_CL_INTEREST_PAYABLE = 'lia_cl_int_payable'\nLIABILITY_CL_ST_NOTES_PAYABLE = 'lia_cl_st_notes_payable'\nLIABILITY_CL_LTD_MATURITIES = 'lia_cl_ltd_mat'\nLIABILITY_CL_DEFERRED_REVENUE = 'lia_cl_def_rev'\nLIABILITY_CL_OTHER = 'lia_cl_other'\nLIABILITY_LTL_NOTES_PAYABLE = 'lia_ltl_notes'\nLIABILITY_LTL_BONDS_PAYABLE = 'lia_ltl_bonds'\nLIABILITY_LTL_MORTGAGE_PAYABLE = 'lia_ltl_mortgage'\nEQUITY_CAPITAL = 'eq_capital'\nEQUITY_ADJUSTMENT = 'eq_adjustment'\nEQUITY_COMMON_STOCK = 'eq_stock_common'\nEQUITY_PREFERRED_STOCK = 'eq_stock_preferred'\nEQUITY_DIVIDENDS = 'eq_dividends'\nINCOME_OPERATIONAL = 'in_operational'\nINCOME_PASSIVE = 'in_passive'\nINCOME_CAPITAL_GAIN_LOSS = 'in_gain_loss'\nINCOME_INTEREST = 'in_interest'\nINCOME_OTHER = 'in_other'\nCOGS = 'cogs_regular'\nEXPENSE_OPERATIONAL = 'ex_regular'\nEXPENSE_CAPITAL = 'ex_capital'\nEXPENSE_DEPRECIATION = 'ex_depreciation'\nEXPENSE_AMORTIZATION = 'ex_amortization'\nEXPENSE_TAXES = 'ex_taxes'\nEXPENSE_INTEREST_ST = 'ex_interest_st'\nEXPENSE_INTEREST_LT = 'ex_interest'\nEXPENSE_OTHER = 'ex_other'\nROOT_COA = 'root_coa'\nROOT_ASSETS = 'root_assets'\nROOT_LIABILITIES = 'root_liabilities'\nROOT_CAPITAL = 'root_capital'\nROOT_INCOME = 'root_income'\nROOT_COGS = 'root_cogs'\nROOT_EXPENSES = 'root_expenses'\nROOT_GROUP = [\n ROOT_COA,\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_LEVEL_2 = [\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_META = {\n ROOT_COA: {\n 'code': '00000',\n 'title': 'CoA Root Node',\n 'balance_type': DEBIT\n },\n ROOT_ASSETS: {\n 'code': '01000',\n 'title': 'Asset Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_LIABILITIES: {\n 'code': '02000',\n 'title': 'Liability Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_CAPITAL: {\n 'code': '03000',\n 'title': 'Capital Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_INCOME: {\n 'code': '04000',\n 'title': 'Income Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_COGS: {\n 'code': '05000',\n 'title': 'COGS Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_EXPENSES: {\n 'code': '06000',\n 'title': 'Expense Accounts Root Node',\n 'balance_type': DEBIT\n },\n}\nGROUP_QUICK_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES\n]\nGROUP_CURRENT_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES,\n ASSET_CA_INVENTORY,\n ASSET_CA_RECEIVABLES,\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_NON_CURRENT_ASSETS = [\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_LAND,\n ASSET_LTI_SECURITIES,\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION,\n ASSET_INTANGIBLE_ASSETS,\n ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION,\n ASSET_ADJUSTMENTS\n]\nGROUP_ASSETS = GROUP_CURRENT_ASSETS + GROUP_NON_CURRENT_ASSETS\nGROUP_CURRENT_LIABILITIES = [\n LIABILITY_CL_ACC_PAYABLE,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_OTHER,\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE\n]\nGROUP_LT_LIABILITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n]\nGROUP_LIABILITIES = GROUP_CURRENT_LIABILITIES + GROUP_LT_LIABILITIES\nGROUP_CAPITAL = [\n EQUITY_CAPITAL,\n EQUITY_COMMON_STOCK,\n EQUITY_PREFERRED_STOCK,\n EQUITY_DIVIDENDS,\n EQUITY_ADJUSTMENT\n]\nGROUP_INCOME = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_COGS = [\n COGS\n]\nGROUP_EXPENSES = [\n EXPENSE_OPERATIONAL,\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_NET_PROFIT = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER,\n COGS\n]\nGROUP_GROSS_PROFIT = [\n INCOME_OPERATIONAL,\n COGS\n]\nGROUP_NET_SALES = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE\n]\nGROUP_PPE_ACCUM_DEPRECIATION = [\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION\n]\nGROUP_EXPENSE_DEP_AND_AMT = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_EARNINGS = GROUP_INCOME + GROUP_COGS + GROUP_EXPENSES\nGROUP_EQUITY = GROUP_CAPITAL + GROUP_EARNINGS\nGROUP_LIABILITIES_EQUITY = GROUP_LIABILITIES + GROUP_EQUITY\nGROUP_INVOICE = [ASSET_CA_CASH, ASSET_CA_RECEIVABLES, LIABILITY_CL_DEFERRED_REVENUE]\nGROUP_BILL = [ASSET_CA_CASH, ASSET_CA_PREPAID, LIABILITY_CL_ACC_PAYABLE]\nGROUP_IC_OPERATING_REVENUES = [INCOME_OPERATIONAL]\nGROUP_IC_OPERATING_COGS = [COGS]\nGROUP_IC_OPERATING_EXPENSES = [EXPENSE_OPERATIONAL]\nGROUP_IC_OTHER_REVENUES = [\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_IC_OTHER_EXPENSES = [\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_CFS_NET_INCOME = GROUP_EARNINGS\nGROUP_CFS_OP_DEPRECIATION_AMORTIZATION = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_CFS_OP_INVESTMENT_GAINS = [\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_OP_ACCOUNTS_RECEIVABLE = [\n ASSET_CA_RECEIVABLES\n]\nGROUP_CFS_OP_INVENTORY = [\n ASSET_CA_INVENTORY\n]\nGROUP_CFS_OP_ACCOUNTS_PAYABLE = [\n LIABILITY_CL_ACC_PAYABLE\n]\nGROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT = [\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT = [\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_OTHER,\n]\nGROUP_CFS_OPERATING = list(chain.from_iterable([\n GROUP_CFS_NET_INCOME,\n GROUP_CFS_OP_DEPRECIATION_AMORTIZATION,\n GROUP_CFS_OP_INVESTMENT_GAINS,\n GROUP_CFS_OP_ACCOUNTS_RECEIVABLE,\n GROUP_CFS_OP_INVENTORY,\n GROUP_CFS_OP_ACCOUNTS_PAYABLE,\n GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT,\n GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT\n]))\nGROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]\nGROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]\nGROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]\nGROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]\nGROUP_CFS_FINANCING = GROUP_CFS_FIN_ISSUING_EQUITY + GROUP_CFS_FIN_DIVIDENDS\nGROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE = [\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_PLANT,\n ASSET_PPE_EQUIPMENT,\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_INV_LTD_OF_PPE = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n]\nGROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE\nGROUP_CFS_INV_PURCHASE_OF_SECURITIES = [\n ASSET_CA_MKT_SECURITIES,\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_SECURITIES,\n INCOME_INTEREST,\n INCOME_PASSIVE,\n]\nGROUP_CFS_INV_LTD_OF_SECURITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE\n]\nGROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES\nGROUP_CFS_INVESTING = GROUP_CFS_INVESTING_PPE + GROUP_CFS_INVESTING_SECURITIES\nGROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING\nBS_ASSET_ROLE = 'assets'\nBS_LIABILITIES_ROLE = 'liabilities'\nBS_EQUITY_ROLE = 'equity'\nACCOUNT_ROLE_CHOICES = [\n (BS_ASSET_ROLE.capitalize(), (\n # CURRENT ASSETS ----\n (ASSET_CA_CASH, _('Current Asset')),\n (ASSET_CA_MKT_SECURITIES, _('Marketable Securities')),\n (ASSET_CA_RECEIVABLES, _('Receivables')),\n (ASSET_CA_INVENTORY, _('Inventory')),\n (ASSET_CA_UNCOLLECTIBLES, _('Uncollectibles')),\n (ASSET_CA_PREPAID, _('Prepaid')),\n (ASSET_CA_OTHER, _('Other Liquid Assets')),\n\n # LONG TERM INVESTMENTS ---\n (ASSET_LTI_NOTES_RECEIVABLE, _('Notes Receivable')),\n (ASSET_LTI_LAND, _('Land')),\n (ASSET_LTI_SECURITIES, _('Securities')),\n\n # PPE ...\n (ASSET_PPE_BUILDINGS, _('Buildings')),\n (ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION, _('Buildings - Accum. Depreciation')),\n (ASSET_PPE_PLANT, _('Plant')),\n (ASSET_PPE_PLANT_ACCUM_DEPRECIATION, _('Plant - Accum. Depreciation')),\n (ASSET_PPE_EQUIPMENT, _('Equipment')),\n (ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION, _('Equipment - Accum. Depreciation')),\n\n # Other Assets ...\n (ASSET_INTANGIBLE_ASSETS, _('Intangible Assets')),\n (ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION, _('Intangible Assets - Accum. Amortization')),\n (ASSET_ADJUSTMENTS, _('Other Assets')),\n )),\n (BS_LIABILITIES_ROLE.capitalize(), (\n\n # CURRENT LIABILITIES ---\n (LIABILITY_CL_ACC_PAYABLE, _('Accounts Payable')),\n (LIABILITY_CL_WAGES_PAYABLE, _('Wages Payable')),\n (LIABILITY_CL_INTEREST_PAYABLE, _('Interest Payable')),\n (LIABILITY_CL_TAXES_PAYABLE, _('Taxes Payable')),\n (LIABILITY_CL_ST_NOTES_PAYABLE, _('Short Term Notes Payable')),\n (LIABILITY_CL_LTD_MATURITIES, _('Current Maturities of Long Tern Debt')),\n (LIABILITY_CL_DEFERRED_REVENUE, _('Deferred Revenue')),\n (LIABILITY_CL_OTHER, _('Other Liabilities')),\n\n # LONG TERM LIABILITIES ----\n (LIABILITY_LTL_NOTES_PAYABLE, _('Long Term Notes Payable')),\n (LIABILITY_LTL_BONDS_PAYABLE, _('Bonds Payable')),\n (LIABILITY_LTL_MORTGAGE_PAYABLE, _('Mortgage Payable')),\n )),\n (BS_EQUITY_ROLE.capitalize(), (\n\n # EQUITY ---\n (EQUITY_CAPITAL, _('Capital')),\n (EQUITY_COMMON_STOCK, _('Common Stock')),\n (EQUITY_PREFERRED_STOCK, _('Preferred Stock')),\n (EQUITY_ADJUSTMENT, _('Other Equity Adjustments')),\n (EQUITY_DIVIDENDS, _('Dividends & Distributions to Shareholders')),\n\n # INCOME ---\n (INCOME_OPERATIONAL, _('Operational Income')),\n (INCOME_PASSIVE, _('Investing/Passive Income')),\n (INCOME_INTEREST, _('Interest Income')),\n (INCOME_CAPITAL_GAIN_LOSS, _('Capital Gain/Loss Income')),\n (INCOME_OTHER, _('Other Income')),\n\n # COGS ----\n (COGS, _('Cost of Goods Sold')),\n\n # EXPENSES ----\n (EXPENSE_OPERATIONAL, _('Regular Expense')),\n (EXPENSE_INTEREST_ST, _('Interest Expense - Short Term Debt')),\n (EXPENSE_INTEREST_LT, _('Interest Expense - Long Term Debt')),\n (EXPENSE_TAXES, _('Tax Expense')),\n (EXPENSE_CAPITAL, _('Capital Expense')),\n (EXPENSE_DEPRECIATION, _('Depreciation Expense')),\n (EXPENSE_AMORTIZATION, _('Amortization Expense')),\n (EXPENSE_OTHER, _('Other Expense')),\n )),\n ('Root', (\n (ROOT_COA, 'CoA Root Account'),\n (ROOT_ASSETS, 'Assets Root Account'),\n (ROOT_LIABILITIES, 'Liabilities Root Account'),\n (ROOT_CAPITAL, 'Capital Root Account'),\n (ROOT_INCOME, 'Income Root Account'),\n (ROOT_COGS, 'COGS Root Account'),\n (ROOT_EXPENSES, 'Expenses Root Account'),\n ))\n]\nACCOUNT_CHOICES_NO_ROOT = [c for c in ACCOUNT_ROLE_CHOICES if c[0] != 'Root']\nROLES_ORDER_ASSETS = [a[0] for a in ACCOUNT_ROLE_CHOICES[0][1]]\nROLES_ORDER_LIABILITIES = [a[0] for a in ACCOUNT_ROLE_CHOICES[1][1]]\nROLES_ORDER_CAPITAL = [a[0] for a in ACCOUNT_ROLE_CHOICES[2][1]]\nROLES_ORDER_ALL = list(chain.from_iterable([ROLES_ORDER_ASSETS, ROLES_ORDER_LIABILITIES, ROLES_ORDER_CAPITAL]))\nACCOUNT_LIST_ROLE_ORDER = list(r[0] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT]))\nACCOUNT_LIST_ROLE_VERBOSE = {r[0]: r[1] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT])}\nROLE_TUPLES = sum([[(r[0].lower(), s[0]) for s in r[1]] for r in ACCOUNT_ROLE_CHOICES], list())\nROLE_DICT = dict([(t[0].lower(), [r[0] for r in t[1]]) for t in ACCOUNT_ROLE_CHOICES])\nVALID_ROLES = [r[1] for r in ROLE_TUPLES]\nBS_ROLES = dict((r[1], r[0]) for r in ROLE_TUPLES)\nBS_BUCKETS = {\n '0': 'Root',\n '1': 'Asset',\n '2': 'Liability',\n '3': 'Capital',\n '4': 'Income',\n '5': 'COGS',\n '6': 'Expenses'\n}\nBS_BUCKETS_ORDER = [v for _, v in BS_BUCKETS.items() if v != 'Root']\nROLES_VARS = locals().keys()\nROLES_DIRECTORY = dict()\nROLES_CATEGORIES = ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'COGS', 'EXPENSE']\nROLES_GROUPS = [g for g in ROLES_VARS if g.split('_')[0] == 'GROUP']\nGROUPS_DIRECTORY = dict()\ndef validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:" }, { "identifier": "RoleContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class RoleContextManager:\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.DIGEST = io_data\n self.DIGEST['role_account'] = None\n self.DIGEST['role_balance'] = None\n\n self.ACCOUNTS = io_data['accounts']\n\n self.ROLES_ACCOUNTS = dict()\n self.ROLES_BALANCES = dict()\n self.ROLES_BALANCE_SHEET = dict()\n\n if self.BY_PERIOD:\n self.ROLES_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_period'] = None\n if self.BY_UNIT:\n self.ROLES_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_unit'] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_roles()\n self.DIGEST['role_account'] = self.ROLES_ACCOUNTS\n self.DIGEST['role_balance'] = self.ROLES_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['role_balance_by_period'] = self.ROLES_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['role_balance_by_unit'] = self.ROLES_BALANCES_BY_UNIT\n\n return self.DIGEST\n\n def process_roles(self):\n\n for c, l in roles_module.ROLES_DIRECTORY.items():\n for r in l:\n acc_list = list(acc for acc in self.ACCOUNTS if acc['role'] == getattr(roles_module, r))\n\n self.ROLES_ACCOUNTS[r] = acc_list\n self.ROLES_BALANCES[r] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ROLES_BALANCES_BY_PERIOD[key][r] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ROLES_BALANCES_BY_UNIT[key][r] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "GroupContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class GroupContextManager:\n GROUP_ACCOUNTS_KEY = 'group_account'\n GROUP_BALANCE_KEY = 'group_balance'\n GROUP_BALANCE_BY_UNIT_KEY = 'group_balance_by_unit'\n GROUP_BALANCE_BY_PERIOD_KEY = 'group_balance_by_period'\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.IO_DIGEST = io_data\n\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = None\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = None\n\n self.DIGEST_ACCOUNTS = io_data['accounts']\n\n self.GROUPS_ACCOUNTS = dict()\n self.GROUPS_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.GROUPS_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n if self.BY_UNIT:\n self.GROUPS_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.GROUPS_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n def digest(self):\n\n self.process_groups()\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = self.GROUPS_ACCOUNTS\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = self.GROUPS_BALANCES\n\n if self.BY_PERIOD:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = self.GROUPS_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = self.GROUPS_BALANCES_BY_UNIT\n return self.IO_DIGEST\n\n def get_accounts_generator(self, mod, g):\n return (acc for acc in self.DIGEST_ACCOUNTS if acc['role'] in getattr(mod, g))\n\n def process_groups(self):\n for g in roles_module.ROLES_GROUPS:\n acc_list = list(self.get_accounts_generator(roles_module, g))\n self.GROUPS_ACCOUNTS[g] = acc_list\n self.GROUPS_BALANCES[g] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.GROUPS_BALANCES_BY_PERIOD[key][g] = sum(\n acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.GROUPS_BALANCES_BY_UNIT[key][g] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0]\n )" }, { "identifier": "ActivityContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class ActivityContextManager:\n\n def __init__(self,\n io_data: dict,\n by_unit: bool = False,\n by_period: bool = False):\n\n self.DIGEST = io_data\n self.DIGEST['activity_account'] = None\n self.DIGEST['activity_balance'] = None\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.ACCOUNTS = io_data['accounts']\n self.ACTIVITY_ACCOUNTS = dict()\n self.ACTIVITY_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.ACTIVITY_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_period'] = None\n if self.BY_UNIT:\n self.ACTIVITY_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_unit'] = None\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_activity()\n self.DIGEST['activity_account'] = self.ACTIVITY_ACCOUNTS\n self.DIGEST['activity_balance'] = self.ACTIVITY_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['activity_balance_by_period'] = self.ACTIVITY_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['activity_balance_by_unit'] = self.ACTIVITY_BALANCES_BY_PERIOD\n\n def get_accounts_generator(self, activity: str):\n return (acc for acc in self.ACCOUNTS if acc['activity'] == activity)\n\n def process_activity(self):\n JournalEntryModel = lazy_importer.get_journal_entry_model()\n for act in JournalEntryModel.VALID_ACTIVITIES:\n acc_list = list(self.get_accounts_generator(act))\n self.ACTIVITY_ACCOUNTS[act] = acc_list\n self.ACTIVITY_BALANCES[act] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ACTIVITY_BALANCES_BY_PERIOD[key][act] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ACTIVITY_BALANCES_BY_UNIT[key][act] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "BalanceSheetStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class BalanceSheetStatementContextManager:\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n gb_bs = {\n bsr: list(l) for bsr, l in groupby(\n chain.from_iterable(\n [\n self.DIGEST['group_account']['GROUP_ASSETS'],\n self.DIGEST['group_account']['GROUP_LIABILITIES'],\n self.DIGEST['group_account']['GROUP_CAPITAL'],\n ]\n ),\n key=lambda acc: acc['role_bs'])\n }\n\n bs_context = {\n bs_role: {\n 'total_balance': sum(a['balance'] for a in gb),\n 'is_block': True,\n 'roles': {\n r: {\n 'accounts': list(a)\n } for r, a in groupby(list(gb), key=lambda acc: acc['role'])\n }\n } for bs_role, gb in gb_bs.items()\n }\n\n for bs_role, bs_role_data in bs_context.items():\n for acc_role, role_data in bs_role_data['roles'].items():\n role_data['total_balance'] = sum(a['balance'] for a in role_data['accounts'])\n role_data['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc_role]\n\n bs_context['equity_balance'] = self.DIGEST['group_balance']['GROUP_EQUITY']\n bs_context['retained_earnings_balance'] = self.DIGEST['group_balance']['GROUP_EARNINGS']\n bs_context['liabilities_equity_balance'] = self.DIGEST['group_balance']['GROUP_LIABILITIES_EQUITY']\n\n self.DIGEST['balance_sheet'] = bs_context\n\n return self.DIGEST" }, { "identifier": "IncomeStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class IncomeStatementContextManager:\n\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n self.DIGEST['income_statement'] = {\n 'operating': {\n 'revenues': [\n acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_REVENUES\n ],\n 'cogs': [\n acc for acc in self.DIGEST['group_account']['GROUP_COGS'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_COGS\n ],\n 'expenses': [\n acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_EXPENSES\n ]\n },\n 'other': {\n 'revenues': [acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_REVENUES],\n 'expenses': [acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_EXPENSES],\n }\n }\n\n for activity, ic_section in self.DIGEST['income_statement'].items():\n for section, acc_list in ic_section.items():\n for acc in acc_list:\n acc['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc['role']]\n\n # OPERATING INCOME...\n self.DIGEST['income_statement']['operating']['gross_profit'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs']\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs'],\n self.DIGEST['income_statement']['operating']['expenses'],\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_revenue'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['revenues']\n )\n self.DIGEST['income_statement']['operating']['net_cogs'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['cogs']\n )\n self.DIGEST['income_statement']['operating']['net_operating_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['expenses']\n )\n\n # OTHER INCOME....\n self.DIGEST['income_statement']['other']['net_other_revenues'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['revenues']\n )\n self.DIGEST['income_statement']['other']['net_other_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['expenses']\n )\n self.DIGEST['income_statement']['other']['net_other_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['other']['revenues'],\n self.DIGEST['income_statement']['other']['expenses']\n ]\n ))\n\n # NET INCOME...\n self.DIGEST['income_statement']['net_income'] = self.DIGEST['income_statement']['operating'][\n 'net_operating_income']\n self.DIGEST['income_statement']['net_income'] += self.DIGEST['income_statement']['other'][\n 'net_other_income']\n return self.DIGEST" }, { "identifier": "CashFlowStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class CashFlowStatementContextManager:\n CFS_DIGEST_KEY = 'cash_flow_statement'\n\n # todo: implement by period and by unit...\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n self.IO_DIGEST = io_data\n self.CASH_ACCOUNTS = [a for a in self.IO_DIGEST['accounts'] if a['role'] == roles_module.ASSET_CA_CASH]\n self.JE_MODEL = lazy_loader.get_journal_entry_model()\n\n def check_io_digest(self):\n if GroupContextManager.GROUP_BALANCE_KEY not in self.IO_DIGEST:\n raise ValidationError(\n 'IO Digest must have groups for Cash Flow Statement'\n )\n\n def operating(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n operating_activities = dict()\n operating_activities['GROUP_CFS_NET_INCOME'] = {\n 'description': 'Net Income',\n 'balance': group_balances['GROUP_CFS_NET_INCOME']\n }\n operating_activities['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION'] = {\n 'description': 'Depreciation & Amortization of Assets',\n 'balance': -group_balances['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION']\n }\n operating_activities['GROUP_CFS_OP_INVESTMENT_GAINS'] = {\n 'description': 'Gain/Loss Sale of Assets',\n 'balance': group_balances['GROUP_CFS_OP_INVESTMENT_GAINS']\n }\n operating_activities['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE'] = {\n 'description': 'Accounts Receivable',\n 'balance': -group_balances['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE']\n }\n operating_activities['GROUP_CFS_OP_INVENTORY'] = {\n 'description': 'Inventories',\n 'balance': -group_balances['GROUP_CFS_OP_INVENTORY']\n }\n\n operating_activities['GROUP_CFS_OP_ACCOUNTS_PAYABLE'] = {\n 'description': 'Accounts Payable',\n 'balance': group_balances['GROUP_CFS_OP_ACCOUNTS_PAYABLE']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT'] = {\n 'description': 'Other Current Assets',\n 'balance': -group_balances['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT'] = {\n 'description': 'Other Current Liabilities',\n 'balance': group_balances['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT']\n }\n\n net_cash_by_op_activities = sum(i['balance'] for g, i in operating_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['operating'] = operating_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'] = dict(\n OPERATING=net_cash_by_op_activities\n )\n\n def financing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n financing_activities = dict()\n financing_activities['GROUP_CFS_FIN_ISSUING_EQUITY'] = {\n 'description': 'Common Stock, Preferred Stock and Capital Raised',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_EQUITY)\n }\n financing_activities['GROUP_CFS_FIN_DIVIDENDS'] = {\n 'description': 'Dividends Payed Out to Shareholders',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_DIVIDENDS)\n }\n financing_activities['GROUP_CFS_FIN_ST_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Short-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_STD)\n }\n financing_activities['GROUP_CFS_FIN_LT_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Long-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_LTD)\n }\n\n net_cash = sum(i['balance'] for g, i in financing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['financing'] = financing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['FINANCING'] = net_cash\n\n def investing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n investing_activities = dict()\n investing_activities['GROUP_CFS_INVESTING_SECURITIES'] = {\n 'description': 'Purchase, Maturity and Sales of Investments & Securities',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_SECURITIES)\n }\n investing_activities['GROUP_CFS_INVESTING_PPE'] = {\n 'description': 'Addition and Disposition of Property, Plant & Equipment',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_PPE)\n }\n\n net_cash = sum(i['balance'] for g, i in investing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['investing'] = investing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['INVESTING'] = net_cash\n\n def net_cash(self):\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash'] = sum([\n bal for act, bal in self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'].items()\n ])\n\n def digest(self):\n self.check_io_digest()\n self.operating()\n self.financing()\n self.investing()\n self.net_cash()\n return self.IO_DIGEST" }, { "identifier": "IODigestContextManager", "path": "django_ledger/io/io_digest.py", "snippet": "class IODigestContextManager:\n\n def __init__(self, io_data: defaultdict):\n self.IO_DATA: defaultdict = io_data\n self.IO_MODEL = self.IO_DATA['io_model']\n self.TXS_QS = self.IO_DATA['txs_qs']\n self.STRFTIME_FORMAT = '%B %d, %Y'\n\n def get_io_data(self) -> defaultdict:\n return self.IO_DATA\n\n def get_strftime_format(self):\n return self.STRFTIME_FORMAT\n\n def get_from_date(self, as_str: bool = False, fmt=None) -> Optional[date]:\n from_date = self.IO_DATA['from_date']\n if from_date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return from_date.strftime(fmt)\n return from_date\n\n def get_to_date(self, as_str: bool = False, fmt=None) -> date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return self.IO_DATA['to_date'].strftime(fmt)\n return self.IO_DATA['to_date']\n\n def is_entity_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_entity_model()\n )\n\n def is_ledger_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_ledger_model()\n )\n\n def is_unit_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_unit_model()\n )\n\n def is_by_unit(self) -> bool:\n return self.IO_DATA['by_unit']\n\n def is_by_period(self) -> bool:\n return self.IO_DATA['by_period']\n\n def is_by_activity(self) -> bool:\n return self.IO_DATA['by_activity']\n\n # Balance Sheet Data...\n def has_balance_sheet(self) -> bool:\n return 'balance_sheet' in self.IO_DATA\n\n def get_balance_sheet_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['balance_sheet']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have balance sheet information available.'\n )\n\n # Income Statement Data...\n def has_income_statement(self) -> bool:\n return 'income_statement' in self.IO_DATA\n\n def get_income_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['income_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have income statement information available.'\n )\n\n # Cash Flow Statement Data...\n def has_cash_flow_statement(self):\n return 'cash_flow_statement' in self.IO_DATA\n\n def get_cash_flow_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['cash_flow_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have cash flow statement information available.'\n )\n\n # CLOSING ENTRIES...\n\n def get_closing_entry_data(self):\n io_data = self.get_io_data()\n return io_data['accounts']" }, { "identifier": "FinancialRatioManager", "path": "django_ledger/io/ratios.py", "snippet": "class FinancialRatioManager:\n\n def __init__(self, io_data):\n self.DIGEST = io_data\n self.ACCOUNTS = io_data['accounts']\n self.RATIO_NA = RATIO_NA\n\n self.quick_assets = io_data['group_balance']['GROUP_QUICK_ASSETS']\n self.assets = io_data['group_balance']['GROUP_ASSETS']\n self.current_liabilities = io_data['group_balance']['GROUP_CURRENT_LIABILITIES']\n self.current_assets = io_data['group_balance']['GROUP_CURRENT_ASSETS']\n self.equity = io_data['group_balance']['GROUP_CAPITAL']\n self.liabilities = io_data['group_balance']['GROUP_LIABILITIES']\n self.net_income = io_data['group_balance']['GROUP_EARNINGS']\n self.net_sales = io_data['group_balance']['GROUP_NET_SALES']\n self.net_profit = io_data['group_balance']['GROUP_NET_PROFIT']\n self.gross_profit = io_data['group_balance']['GROUP_GROSS_PROFIT']\n self.RATIOS = dict()\n\n def digest(self):\n self.quick_ratio()\n self.current_ratio()\n self.debt_to_equity()\n self.return_on_equity()\n self.return_on_assets()\n self.net_profit_margin()\n self.gross_profit_margin()\n self.DIGEST['ratios'] = self.RATIOS\n return self.DIGEST\n\n # ------> SOLVENCY RATIOS <------\n def quick_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = self.RATIO_NA\n else:\n cr = self.quick_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['quick_ratio'] = cr\n\n def current_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = RATIO_NA\n else:\n cr = self.current_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['current_ratio'] = cr\n\n # ------> LEVERAGE RATIOS <------\n def debt_to_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.liabilities / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['debt_to_equity'] = cr\n\n # ------> PROFITABILITY RATIOS <------\n def return_on_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_equity'] = cr\n\n def return_on_assets(self, as_percent=False):\n if self.assets == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.assets\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_assets'] = cr\n\n def net_profit_margin(self, as_percent=False):\n if self.net_sales == 0:\n npm = RATIO_NA\n else:\n npm = self.net_profit / self.net_sales\n if as_percent:\n npm = npm * 100\n self.RATIOS['net_profit_margin'] = npm\n\n def gross_profit_margin(self, as_percent=False):\n if self.gross_profit == 0:\n gpm = RATIO_NA\n else:\n gpm = self.gross_profit / self.net_sales\n if as_percent:\n gpm = gpm * 100\n self.RATIOS['gross_profit_margin'] = gpm" }, { "identifier": "lazy_loader", "path": "django_ledger/models/utils.py", "snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):" } ]
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
13,672
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception:
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception:
raise TransactionNotInBalanceError(
2
2023-10-20 01:07:20+00:00
16k
hitz-zentroa/This-is-not-a-Dataset
run.py
[ { "identifier": "load_model", "path": "load_model.py", "snippet": "def load_model(\n inference: bool,\n model_weights_name_or_path: str,\n quantization: Optional[int] = None,\n use_lora: bool = False,\n lora_weights_name_or_path: Optional[str] = None,\n lora_target_modules: Optional[List[str]] = [\"all\"],\n lora_r: Optional[int] = 8,\n lora_alpha: Optional[int] = 16,\n lora_dropout: Optional[float] = 0.05,\n torch_dtype: Optional[str] = None,\n force_auto_device_map: bool = False,\n use_gradient_checkpointing: bool = False,\n trust_remote_code: bool = False,\n use_flash_attention: bool = False,\n use_better_transformer: bool = False,\n fsdp_training: bool = False,\n max_memory_MB: Optional[int] = None,\n) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]:\n \"\"\"\n Load any Decoder model for training.\n\n Args:\n inference (`bool`):\n Whether to load the model for inference or training. If set to `True`, the model will be loaded\n in evaluation mode. In this case, if use_lora is set to `True`, you must provide the path to the\n LoRA weights. Defaults to `False`.\n model_weights_name_or_path (`str`):\n The path to your local model weights and tokenizer or huggingface model name.\n The list of labels to add to the tokenizer. Defaults to `None`.\n quantization (`int`, optional):\n '4' or '8' for 4 bits or 8 bits quantization or None for 16/32bits training. Defaults to `None`.\n\n Requires bitsandbytes library: https://github.com/TimDettmers/bitsandbytes\n use_lora (`bool`, optional):\n Whether to use LORA. Defaults to False.\n\n See https://arxiv.org/pdf/2106.09685.pdf for more details.\n\n Requires huggingface PEFT library: https://github.com/huggingface/peft\n lora_weights_name_or_path (`Optional[str]`, optional):\n The name or path to the pre-trained LORA model weights. You can also provide\n a huggingface hub model name to load the weights from there. If not provided,\n the weights will be initialized randomly, this requires training the model.\n Defaults to `None`.\n lora_target_modules (`Optional[List[str]]`, optional):\n The list of modules to apply LORA to. If not provided, we will use PEFT\n default modules. Defaults to `None`.\n lora_r (`Optional[int]`, optional):\n Lora attention dimension. Defaults to `8`.\n lora_alpha (`Optional[int]`, optional):\n The alpha parameter for Lora scaling. Defaults to `16`.\n lora_dropout (`Optional[float]`, optional):\n The dropout probability for Lora layers. Defaults to 0.05.\n torch_dtype (`Optional[str]`, optional):\n Override the default `torch.dtype` and load the model under this dtype. If\n `auto` is passed, the dtype will be automatically derived from the model's\n weights. Defaults to `None`.\n force_auto_device_map (`bool`, optional):\n Whether to force the use of the auto device map. If set to True, the model will be split across\n GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded\n into each GPU. Defaults to False.\n use_gradient_checkpointing (`bool`, optiona):\n Whether to use gradient checkpointing for training\n trust_remote_code (`bool`, optional):\n Trust the remote code from HuggingFace model hub. Defaults to False.\n use_flash_attention (`bool`, optional):\n Whether to use Flash Attention. Defaults to True. Flash attention must be installed, see:\n 'https://github.com/Dao-AILab/flash-attention' for more details.\n use_better_transformer (`bool`, optional):\n Whether to transform the model using Better Transformer library:\n https://huggingface.co/docs/optimum/bettertransformer/overview. Requires optimum\n 'https://huggingface.co/docs/optimum/installation'. Only supported for inference!\n Defaults to False.\n fsdp_training: (`bool`, optional):\n Whether Fully Sharded Data Parallelism is enabled for training. Defaults to False.\n Used to prevent casting layers to fp32 if the model is already in fp16, which causes\n an error: ValueError: Must flatten tensors with uniform dtype but got torch.float16 and torch.float32\n max_memory_MB (`int`):\n Free memory per gpu in MB. Used to compute the device map when force_auto_device_map is set to True.\n Raises:\n `ValueError`:\n is raised when `int8_quantization=True` but `use_lora=False`.\n\n Returns:\n `Tuple[PreTrainedModel, PreTrainedTokenizerBase]`:\n The loaded model and tokenizer.\n \"\"\"\n\n # Sanity checks\n\n if isinstance(quantization, str):\n quantization = int(quantization)\n assert (quantization is None) or (\n quantization in [4, 8]\n ), f\"Quantization must be 4 or 8, or None for FP32/FP16 training. You passed: {quantization}\"\n\n if not inference and quantization is not None and not use_lora:\n raise ValueError(\n \"'Quantization' == 4/8 is only supported with LoRA. If you want \"\n \"to train a 4/8bits quantified model, you must set `use_lora=True`. If you want to \"\n \"use a 4/8 bits optimizer, set `quantization=None` and choose a 4/8 bit optimizer using 'optim' \"\n \"argument (e.g 'adamw_bnb_8bit', 'lion_8bit', 'paged_adamw_8bit', ...).\"\n )\n\n if inference and use_lora and lora_weights_name_or_path is None:\n raise ValueError(\n \"You must provide the path to the LoRA weights when loading the model for inference.\"\n )\n\n if use_better_transformer and not inference:\n logging.warning(\n \"Better Transformer is only supported for inference. Better Transformers does not support \"\n \"attention mask for training, therefore it is not compatible with CoLLIE training. See \"\n \"https://huggingface.co/docs/optimum/bettertransformer/overview for more details. We will \"\n \"set use_better_transformer=False.\"\n )\n use_better_transformer = False\n\n if use_better_transformer and use_flash_attention:\n raise ValueError(\n \"You cannot use both Flash Attention and Better Transformer flags. Flash Attention is already part of\"\n \" Better Transformers, so you can just set use_better_transformer=True to use Flash Attention. The Flash\"\n \" Attention flag is intended for patching HuggingFace models.\"\n )\n\n if lora_weights_name_or_path is not None and not use_lora:\n logging.warning(\n \"You provided a path to LoRA weights but use_lora is set to False. We will set use_lora=True.\"\n )\n use_lora = True\n\n logging.info(f\"Loading model model from {model_weights_name_or_path}\")\n\n MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.update(\n {\n \"stablelm_epoch\": \"LlamaForCausalLM\",\n }\n )\n\n # Get the device map config\n\n device_map, max_memory = get_device_map(\n force_auto_device_map=force_auto_device_map,\n max_memory_MB=max_memory_MB,\n use_better_transformer=use_better_transformer,\n )\n\n # Load the model config\n\n if use_lora:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n pretraining_tp=1, # Fix mat1 and mat2 shapes cannot be multiplied error with LLaMA-2\n # See https://github.com/huggingface/transformers/pull/24906\n )\n else:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n )\n\n # Load the model tokenizer\n\n tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained(\n model_weights_name_or_path,\n add_eos_token=True,\n trust_remote_code=trust_remote_code,\n legacy=True, # This library was developed with the legacy tokenizer.\n # It might or might not work with the latest updates to the T5 tokenizers. So we set legacy=True to be safe.\n )\n\n if tokenizer.pad_token_id is None:\n if \"<|padding|>\" in tokenizer.get_vocab():\n # StabilityLM specific fix\n tokenizer.add_special_tokens({\"pad_token\": \"<|padding|>\"})\n elif tokenizer.unk_token is not None:\n logging.warning(\n \"Tokenizer does not have a pad token, we will use the unk token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.unk_token_id\n else:\n logging.warning(\n \"Tokenizer does not have a pad token. We will use the eos token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.eos_token_id\n\n # Load the model weights\n\n # Get the quantization config\n quant_args = {}\n torch_dtype = (\n torch_dtype if torch_dtype in [\"auto\", None] else getattr(torch, torch_dtype)\n )\n\n if quantization is not None:\n quant_args = (\n {\"load_in_4bit\": True} if quantization == 4 else {\"load_in_8bit\": True}\n )\n if quantization == 4:\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n if torch_dtype in [\"auto\", None]\n else torch_dtype,\n )\n\n else:\n bnb_config = BitsAndBytesConfig(\n load_in_8bit=True,\n )\n logging.info(\n f\"Bits and Bytes config: {json.dumps(bnb_config.to_dict(),indent=4,ensure_ascii=False)}\"\n )\n else:\n logging.info(f\"Loading model with dtype: {torch_dtype}\")\n bnb_config = None\n\n # Get the correct load function for each model_type\n if config.model_type in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is a encoder-decoder model. We will load it as a Seq2SeqLM model.\"\n )\n\n load_fn = AutoModelForSeq2SeqLM\n model_type = \"seq2seq\"\n\n elif config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is an decoder-only model. We will load it as a CausalLM model.\"\n )\n\n load_fn = AutoModelForCausalLM\n tokenizer.padding_side = \"left\"\n model_type = \"causal\"\n\n else:\n raise ValueError(\n f\"Model {model_weights_name_or_path} of type {config.model_type} is not supported by CoLLIE.\"\n \"Supported models are:\\n\"\n f\"Seq2SeqLM: {MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES}\\n\"\n f\"CausalLM: {MODEL_FOR_CAUSAL_LM_MAPPING_NAMES}\\n\"\n )\n\n # Load the model weights\n # Flash attention 2 was added to HuggingFace transformers very recently. Let's add it as kwargs to the load function\n # so if it is set to False, we can load the model in older versions of transformers.\n if use_flash_attention:\n kwargs = {\"use_flash_attention_2\": True}\n else:\n kwargs = {}\n\n model: PreTrainedModel = load_fn.from_pretrained(\n pretrained_model_name_or_path=model_weights_name_or_path,\n device_map=device_map,\n max_memory=max_memory,\n quantization_config=bnb_config,\n torch_dtype=torch_dtype,\n config=config,\n trust_remote_code=trust_remote_code,\n **quant_args,\n **kwargs,\n )\n\n logging.info(f\"Model dtype: {model.dtype}\")\n logging.info(\n \"Total model memory footprint: \"\n + str(model.get_memory_footprint() / 1e6)\n + \" MB\"\n )\n\n # Prepare the model for k-bit training and enable gradient checkpointing\n if quantization is not None and not inference:\n from peft import prepare_model_for_kbit_training\n\n model = prepare_model_for_kbit_training(\n model, use_gradient_checkpointing=use_gradient_checkpointing\n )\n else:\n if use_gradient_checkpointing and not inference:\n model.gradient_checkpointing_enable()\n\n # Load LoRA weights\n if use_lora:\n from peft import LoraConfig, PeftModel, TaskType, get_peft_model\n\n if not inference:\n model.enable_input_require_grads() # Enables the gradients for the input embeddings\n\n if lora_weights_name_or_path is None:\n logging.info(\n \"No pretrained LORA weights provided, we will initialize the weights randomly.\"\n )\n\n if lora_target_modules is None or (\n lora_target_modules is not None and len(lora_target_modules) == 0\n ):\n logging.warning(\n \"No target modules provided, will use the default modules for the\"\n \" model in huggingface PEFT library. \"\n )\n lora_target_modules = None\n\n if lora_target_modules == [\"all\"]:\n logging.warning(\n \"You provided 'all' as target modules, we will use all the model to which LoRA can be applied.\"\n )\n lora_target_modules = find_all_linear_names(\n model, quantization=quantization\n )\n\n lora_config = LoraConfig(\n r=lora_r,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n bias=\"none\",\n task_type=TaskType.CAUSAL_LM\n if model_type == \"causal\"\n else TaskType.SEQ_2_SEQ_LM,\n target_modules=lora_target_modules,\n )\n\n model = get_peft_model(model, lora_config)\n\n else:\n logging.info(\n f\"Loading pretrained LORA weights from {lora_weights_name_or_path}\"\n )\n\n model = PeftModel.from_pretrained(model, lora_weights_name_or_path)\n\n logging.info(f\"\\nLoRA config:\\n{model.peft_config}\\n\")\n\n if inference:\n if use_lora:\n if quantization is None:\n # If we are not using quantization, we merge the LoRA layers into the model for faster inference.\n # This is not possible if we are using 4/8 bit quantization.\n logging.info(\"Merging LoRA layers into the model for faster inference.\")\n model = model.merge_and_unload()\n else:\n logging.info(\n \"Quantization is enabled, we will not merge LoRA layers into the model. Inference will be slower.\"\n )\n else:\n trainable_params, total_params, trainable_percentage = get_trainable_parameters(\n model\n )\n logging.info(\n f\"---> Trainable params: {trainable_params} || all params: {total_params} ||\"\n f\" trainable%: {round(trainable_percentage,6)}\\n\"\n )\n\n return model, tokenizer" }, { "identifier": "get_dataloader", "path": "dataset.py", "snippet": "def get_dataloader(\n tokenizer: PreTrainedTokenizerBase,\n split: str,\n is_encoder_decoder: bool = False,\n max_length: int = 512,\n conv_template: str = None,\n batch_size: int = 1,\n prompt_loss_weight: float = 0.05,\n add_bos_token: bool = False,\n num_workers: int = min(8, os.cpu_count()),\n pattern: str = None,\n only_affirmative: bool = False,\n only_negative: bool = False,\n only_non_distractor: bool = False,\n only_distractor: bool = False,\n) -> DataLoader:\n \"\"\"\n Get a dataloader for a dataset.\n\n Args:\n tokenizer (`PreTrainedTokenizerBase`):\n The tokenizer to use.\n split ('list'):\n The split to load (train, dev, test, all).\n is_encoder_decoder (`bool`, optional):\n Whether the model is an encoder-decoder model. Defaults to `False`.\n max_length (`int`, optional):\n The maximum length of the input. Defaults to `2048`.\n conv_template (`str`, optional):\n The conversation template to use. Defaults to `None`. If `None` we will return the prompt.\n batch_size (`int`, optional):\n The batch size. Defaults to `1`.\n prompt_loss_weight (`float`, optional):\n The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total weight\n of 5% in the loss while the result tokens will have a total weight of 95%. Defaults to `0.05`.\n add_bos_token (`bool`, optional):\n Whether to add the beginning of sentence token to the input. Defaults to `False`.\n num_workers (`int`, optional):\n The number of workers to use for the dataloader. Defaults to `0`.\n pattern (`str`, optional):\n The pattern to use for training. Defaults to `None`.\n only_affirmative (`bool`, optional):\n Whether to only load affirmative examples for training. Defaults to `False`.\n only_negative (`bool`, optional):\n Whether to only load negative examples for training. Defaults to `False`.\n only_non_distractor (`bool`, optional):\n Whether to only load non-distractor examples for training. Defaults to `False`.\n only_distractor (`bool`, optional):\n Whether to only load distractor examples for training. Defaults to `False`.\n\n\n Returns:\n `DataLoader`: The dataloader.\n \"\"\"\n\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n padding=True,\n label_pad_token_id=-100, # tokenizer.pad_token_id,\n # pad_to_multiple_of=8, # May be faster on some hardware\n )\n\n dataset = ThisIsNotADataset(\n tokenizer=tokenizer,\n split=split,\n is_encoder_decoder=is_encoder_decoder,\n max_length=max_length,\n conv_template=conv_template,\n prompt_loss_weight=prompt_loss_weight,\n add_bos_token=add_bos_token,\n pattern=pattern,\n only_affirmative=only_affirmative,\n only_negative=only_negative,\n only_non_distractor=only_non_distractor,\n only_distractor=only_distractor,\n )\n\n return DataLoader(\n dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=split == \"train\",\n collate_fn=data_collator,\n pin_memory=True,\n )" }, { "identifier": "evaluate", "path": "evaluate.py", "snippet": "def evaluate(predictions_path: str, output_path: Optional[str] = None) -> dict:\n \"\"\"\n Evaluate the predictions of a model\n Args:\n predictions_path: Path to the predictions file. It should be a jsonl with the fields: 'pattern_id',\n 'pattern', 'test_id', 'negation_type', 'semantic_type', 'syntactic_scope', 'isDistractor',\n 'label', 'sentence', 'prediction'\n output_path: Path to the output file. If None, the output will be printed to stdout\n Returns:\n A dictionary with the scores\n The scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n \"\"\"\n\n print(\n \"\"\"\n*************************************** Running evaluation ***************************************\nThe scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n**************************************************************************************************\n \"\"\"\n )\n dataset_pattern = {\n \"Synonymy1\": [],\n \"Antonymy1\": [],\n \"Synonymy2\": [],\n \"Antonymy2\": [],\n \"Hypernymy\": [],\n \"Part\": [],\n \"Substance\": [],\n \"Member\": [],\n \"Agent\": [],\n \"Instrument\": [],\n \"Result\": [],\n }\n\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n\n coherence_scorer.from_file(predictions_path)\n with open(predictions_path, \"r\", encoding=\"utf8\") as file:\n for line in file:\n example = json.loads(line.strip())\n pattern = example[\"pattern\"]\n dataset_pattern[pattern].append(example)\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n\n scores = scorer.compute_scores()\n coherence_scorer = Coherence_Scorer.from_file(predictions_path)\n scores[\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n for pattern in dataset_pattern:\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n coherence_scorer.add_pattern(dataset_pattern[pattern])\n for example in dataset_pattern[pattern]:\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n scores[pattern] = scorer.compute_scores()\n scores[pattern][\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n if output_path is not None:\n print(f\"Saving scores to {output_path}\")\n with open(output_path, \"w\", encoding=\"utf8\") as file:\n print(json.dumps(scores, ensure_ascii=False, indent=4), file=file)\n else:\n print(json.dumps(scores, ensure_ascii=False, indent=4))\n\n print(\"*** Evaluation finished ***\")\n return scores" }, { "identifier": "DataTrainingArguments", "path": "config.py", "snippet": "class DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n do_predict_full_dataset: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to run predictions on the full dataset. If True, the model will be evaluated on the \"\n \"full dataset. If False, the model will be evaluated on the test set. Defaults to False.\"\n },\n )\n max_seq_length: int = field(\n default=512,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. Sequences\"\n \" longer than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n\n prompt_loss_weight: float = field(\n default=0.05,\n metadata={\n \"help\": (\n \"The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total\"\n \" weight of 5% in the loss while the result tokens will have a total weight of 95%. Only used for\"\n \" computing the loss in the training data. Defaults to `0.05`.\"\n )\n },\n )\n\n force_auto_device_map: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to force the use of the auto device map. If set to True, the model will be split across \"\n \"GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded \"\n \"into each GPU. Defaults to False.\"\n )\n },\n )\n\n pattern: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"The pattern to use for training. If not specified, all patterns will be used.\"\n ),\n \"choices\": [\n \"Synonymy1\",\n \"Antonymy1\",\n \"Synonymy2\",\n \"Antonymy2\",\n \"Hypernymy\",\n \"Part\",\n \"Substance\",\n \"Member\",\n \"Agent\",\n \"Instrument\",\n \"Result\",\n ],\n },\n )\n\n only_affirmative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load affirmative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_negative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load negative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_non_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load non-distractor examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load distractor examples for training. Defaults to `False`.\"\n )\n },\n )" }, { "identifier": "ModelArguments", "path": "config.py", "snippet": "class ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The local path or huggingface hub name of the model and tokenizer to use.\"\n },\n )\n\n torch_dtype: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"Override the default `torch.dtype` and load the model under this\"\n \" dtype. If `auto` is passed, the dtype will be automatically derived\"\n \" from the model's weights.\"\n ),\n \"choices\": [\"auto\", \"bfloat16\", \"float16\", \"float32\"],\n },\n )\n\n use_lora: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use LoRA. If True, the model will be trained with LoRA: https://arxiv.org/abs/2106.09685\"\n )\n },\n )\n\n quantization: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Whether to use '4' or '8' bit quantization. Requires bitsandbytes library:\"\n \" https://github.com/TimDettmers/bitsandbytes\"\n )\n },\n )\n lora_weights_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"If the model has been trained with LoRA, \"\n \"path or huggingface hub name or local path to the pretrained weights.\"\n )\n },\n )\n\n lora_r: Optional[int] = field(\n default=8,\n metadata={\"help\": \"Lora attention dimension.\"},\n )\n\n lora_alpha: Optional[float] = field(\n default=16,\n metadata={\"help\": \"The alpha parameter for Lora scaling.\"},\n )\n lora_dropout: Optional[float] = field(\n default=0.05,\n metadata={\"help\": \"The dropout probability for Lora layers.\"},\n )\n\n lora_target_modules: Optional[List[str]] = field(\n default_factory=list,\n metadata={\n \"help\": (\n \"The target modules to which LoRA will be applied. If not specified, We\"\n \" will use the default modules for the model in huggingface PEFT library.\"\n )\n },\n )\n\n conversation_template: str = field(\n default=None,\n metadata={\n \"help\": (\n \"The config template to use to generate conversations. See \"\n \"https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py for more details\"\n )\n },\n )\n\n add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to add the BOS token to the beginning of the prompt (Encoder-only models). Defaults to False.\"\n )\n },\n )\n\n use_flash_attention: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use the FlashAttention. If True, we will use FlashAttention. Be careful, not all models \"\n \"support FlashAttention. See https://github.com/huggingface/transformers/issues/26350. \"\n \"Defaults to False.\"\n )\n },\n )" }, { "identifier": "get_optimizer", "path": "optimizer.py", "snippet": "def get_optimizer(training_args: Seq2SeqTrainingArguments, model: PreTrainedModel):\n decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": training_args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_kwargs = {\"lr\": training_args.learning_rate}\n\n adam_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2),\n \"eps\": training_args.adam_epsilon,\n }\n if training_args.optim == OptimizerNames.ADAFACTOR:\n from transformers.optimization import Adafactor\n\n optimizer_cls = Adafactor\n optimizer_kwargs.update({\"scale_parameter\": False, \"relative_step\": False})\n elif training_args.optim == OptimizerNames.ADAMW_HF:\n from transformers.optimization import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n elif training_args.optim in [\n OptimizerNames.ADAMW_TORCH,\n OptimizerNames.ADAMW_TORCH_FUSED,\n ]:\n from torch.optim import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n if training_args.optim == OptimizerNames.ADAMW_TORCH_FUSED:\n optimizer_kwargs.update({\"fused\": True})\n elif training_args.optim == OptimizerNames.ADAMW_TORCH_XLA:\n try:\n from torch_xla.amp.syncfree import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\"Trainer failed to import syncfree AdamW from torch_xla.\")\n elif training_args.optim == OptimizerNames.ADAMW_APEX_FUSED:\n try:\n from apex.optimizers import FusedAdam\n\n optimizer_cls = FusedAdam\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate apex FusedAdam but apex is not installed!\"\n )\n elif training_args.optim in [\n OptimizerNames.ADAMW_BNB,\n OptimizerNames.ADAMW_8BIT,\n OptimizerNames.PAGED_ADAMW,\n OptimizerNames.PAGED_ADAMW_8BIT,\n OptimizerNames.LION,\n OptimizerNames.LION_8BIT,\n OptimizerNames.PAGED_LION,\n OptimizerNames.PAGED_LION_8BIT,\n ]:\n try:\n from bitsandbytes.optim import AdamW, Lion\n\n is_paged = False\n optim_bits = 32\n optimizer_cls = None\n additional_optim_kwargs = adam_kwargs\n if \"paged\" in training_args.optim:\n is_paged = True\n if \"8bit\" in training_args.optim:\n optim_bits = 8\n if \"adam\" in training_args.optim:\n optimizer_cls = AdamW\n elif \"lion\" in training_args.optim:\n optimizer_cls = Lion\n additional_optim_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2)\n }\n\n bnb_kwargs = {\"is_paged\": is_paged, \"optim_bits\": optim_bits}\n optimizer_kwargs.update(additional_optim_kwargs)\n optimizer_kwargs.update(bnb_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb optimizer but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_BNB:\n try:\n from bitsandbytes.optim import Adam8bit\n\n optimizer_cls = Adam8bit\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb Adam8bit but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_ANYPRECISION:\n raise NotImplementedError(\"AdamWAnyprecision is not supported\")\n elif training_args.optim == OptimizerNames.SGD:\n optimizer_cls = torch.optim.SGD\n elif training_args.optim == OptimizerNames.ADAGRAD:\n optimizer_cls = torch.optim.Adagrad\n else:\n raise ValueError(\n f\"Trainer cannot instantiate unsupported optimizer: {training_args.optim}\"\n )\n\n optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum(\n {p.data_ptr(): p.numel() for p in module.parameters()}.values()\n )\n print(f\"skipped {module}: {skipped / 2 ** 20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n print(f\"bitsandbytes: will optimize {module} in fp32\")\n print(f\"skipped: {skipped / 2 ** 20}M params\")\n\n return optimizer" } ]
from load_model import load_model from dataset import get_dataloader from evaluate import evaluate from config import DataTrainingArguments, ModelArguments from transformers import ( HfArgumentParser, Seq2SeqTrainingArguments, set_seed, get_scheduler, ) from tqdm import tqdm from accelerate import Accelerator, find_executable_batch_size from typing import List from optimizer import get_optimizer from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from transformers.modeling_utils import unwrap_model import torch import os import wandb import gc import json import math import sys import logging
10,903
optimizer = get_optimizer(training_args=training_args, model=model) lr_scheduler = get_scheduler( name=training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=int(training_args.warmup_ratio * max_train_steps), num_training_steps=max_train_steps, ) model, optimizer, train_dataloader = accelerator.prepare( model, optimizer, train_dataloader ) if dev_dataloader is not None: dev_dataloader = accelerator.prepare(dev_dataloader) completed_steps = 0 best_epoch_metric: float = -1 validation_dir: str = os.path.join(training_args.output_dir, "val_logs") os.makedirs(validation_dir, exist_ok=True) running_loss = 0 num_batches = 0 first = True progress_bar = tqdm( range(max_train_steps), disable=not accelerator.is_local_main_process, ascii=True, desc="Training", ) for epoch in range(int(training_args.num_train_epochs)): model.train() for step, batch in enumerate(train_dataloader): ### DEBUG ### if first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) decodeable_labels = batch.labels.clone() decodeable_labels[ decodeable_labels == -100 ] = tokenizer.pad_token_id labels = "\n".join( tokenizer.batch_decode( decodeable_labels, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"-- Labels --\n{labels}") print(f"*** End of sample ***\n") first = False loss = compute_loss(model=model, inputs=batch, return_outputs=False) running_loss += loss.item() loss = loss / training_args.gradient_accumulation_steps accelerator.backward(loss) num_batches += 1 if ( step % training_args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1 ): optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if ( accelerator.is_local_main_process and completed_steps > 0 and (completed_steps % 10 == 0) ): wandb.log( { "Train/Loss": loss.item(), "Train/Running Loss": loss.item() / num_batches, "Train/Learning Rate": optimizer.param_groups[0]["lr"], "epoch": epoch, "step": completed_steps, } ) if ( training_args.eval_steps is not None and completed_steps % training_args.eval_steps == 0 and dev_dataloader is not None ): gen_predictions( model=model, tokenizer=tokenizer, true_tokens_ids=true_tokens_ids, false_tokens_ids=false_tokens_ids, dataloader=dev_dataloader, output_path=os.path.join( validation_dir, f"step_{completed_steps}.preds", ), accelerator=accelerator, predict_with_generate=training_args.predict_with_generate, ) if accelerator.is_main_process:
def clean_cache(): """Clean cache to avoid memory leak. This fixes this issue: https://github.com/huggingface/transformers/issues/22801""" print(f"Cleaning GPU memory. Current memory usage: {torch.cuda.memory_allocated()}") torch.cuda.empty_cache() gc.collect() torch.cuda.empty_cache() print(f"GPU memory usage after cleaning: {torch.cuda.memory_allocated()}") def compute_loss(model, inputs, return_outputs=False): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. """ if "labels" in inputs: labels = inputs.pop("labels") else: raise ValueError("You should supply a labels key to compute the loss") if "loss_weight_mask" in inputs: loss_weight_mask = inputs.pop("loss_weight_mask") else: raise ValueError("You should supply a loss_weight_mask key to compute the loss") if unwrap_model(model).config.is_encoder_decoder: outputs = model(labels=labels, **inputs) else: outputs = model(**inputs) logits = outputs["logits"] if isinstance(outputs, dict) else outputs[0] model_name = unwrap_model(model)._get_name() if ( model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values() or model_name == "PeftModelForCausalLM" ): logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_weight_mask = loss_weight_mask[..., 1:].contiguous() logits = logits.view(-1, logits.size(-1)) labels = labels.view(-1) loss_weight_mask = loss_weight_mask.view(-1) loss_fct = torch.nn.CrossEntropyLoss(reduction="none", ignore_index=-100) loss = loss_fct(logits, labels) loss = torch.sum(loss * loss_weight_mask) / torch.sum(loss_weight_mask) return (loss, outputs) if return_outputs else loss def gen_predictions( model, tokenizer, true_tokens_ids: List[int], false_tokens_ids: List[int], dataloader, output_path, accelerator, print_first=False, predict_with_generate=False, return_scores=False, ): if predict_with_generate and return_scores: raise ValueError( "return_scores is not supported when predict_with_generate is True" ) model.eval() with torch.no_grad(): samples_seen: int = 0 yes_id = true_tokens_ids[0] no_id = false_tokens_ids[0] all_preds = [] all_scores = [] first = True for step, batch in enumerate( tqdm(dataloader, f"Inference on {os.path.basename(output_path)}") ): if print_first and accelerator.is_local_main_process: ### DEBUG ### if print_first and first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"*** End of sample ***\n") first = False if not predict_with_generate: if not model.config.is_encoder_decoder: logits = model( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ).logits else: encoder_output = model.get_encoder()( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ) decoder_args = { "attention_mask": batch["attention_mask"], "use_cache": False, "encoder_outputs": encoder_output, } gen_inputs = model.prepare_inputs_for_generation( input_ids=torch.tensor( [[tokenizer.pad_token_id]] * len(batch["input_ids"]) ).to(batch["input_ids"].device), **decoder_args, ) logits = model( **gen_inputs, ).logits logits = logits[:, -1, :] logits = torch.nn.functional.softmax(logits, dim=-1) logits = logits[:, [yes_id, no_id]] logits = logits[:, 0] / (logits[:, 0] + logits[:, 1]) preds = logits > 0.5 preds = accelerator.gather(preds).cpu().tolist() logits = accelerator.gather(logits).cpu().tolist() if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] logits = logits[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) all_preds.extend(preds) all_scores.extend(logits) else: preds = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=6, ) preds = accelerator.gather( accelerator.pad_across_processes( preds, dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() inputs_ids = accelerator.gather( accelerator.pad_across_processes( batch["input_ids"], dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() preds = preds[:, len(inputs_ids[0]) :] if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # print(preds) for pred in preds: pred = pred.lower() if "true" in pred: all_preds.append(True) else: all_preds.append(False) if accelerator.is_local_main_process: with open(output_path, "w", encoding="utf8") as f: for pred in all_preds if not return_scores else all_scores: print(pred, file=f) if not return_scores: json_dataset = dataloader.dataset.get_jsonl() assert len(json_dataset) == len(all_preds) with open( os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8" ) as f: for json_line, pred in zip(json_dataset, all_preds): json_line["prediction"] = bool(pred) print(json.dumps(json_line, ensure_ascii=False), file=f) model.train() def main( model_args: ModelArguments, data_args: DataTrainingArguments, training_args: Seq2SeqTrainingArguments, ): assert ( training_args.do_train or training_args.do_predict ), "You must specify do_train or do_predict" assert not (training_args.do_train and data_args.do_predict_full_dataset), ( "You cannot do both training and predict_full_dataset, " "as the model will be evaluated on the full dataset, which" " includes the training set." ) logging.basicConfig(level=logging.INFO) accelerator = Accelerator() print(f"Accelerator State: {accelerator.state}") set_seed(training_args.seed) if training_args.do_train: model, tokenizer = load_model( inference=False, model_weights_name_or_path=model_args.model_name_or_path, lora_weights_name_or_path=model_args.lora_weights_name_or_path, quantization=model_args.quantization, use_lora=model_args.use_lora, lora_target_modules=model_args.lora_target_modules, torch_dtype=model_args.torch_dtype, force_auto_device_map=data_args.force_auto_device_map, use_flash_attention=model_args.use_flash_attention, use_gradient_checkpointing=model_args.use_lora, ) true_tokens_ids = tokenizer.encode("True", add_special_tokens=False) false_tokens_ids = tokenizer.encode("False", add_special_tokens=False) train_dataloader = get_dataloader( tokenizer=tokenizer, split="train", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) dev_dataloader = None if training_args.do_eval: dev_dataloader = get_dataloader( tokenizer=tokenizer, split="validation", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) if accelerator.is_main_process: wandb.init( project="ThisIsNotADataset", name=f"{os.path.basename(training_args.output_dir)}", config=vars(training_args), ) num_update_steps_per_epoch = math.ceil( len(train_dataloader) / training_args.gradient_accumulation_steps ) max_train_steps = int( training_args.num_train_epochs * num_update_steps_per_epoch ) optimizer = get_optimizer(training_args=training_args, model=model) lr_scheduler = get_scheduler( name=training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=int(training_args.warmup_ratio * max_train_steps), num_training_steps=max_train_steps, ) model, optimizer, train_dataloader = accelerator.prepare( model, optimizer, train_dataloader ) if dev_dataloader is not None: dev_dataloader = accelerator.prepare(dev_dataloader) completed_steps = 0 best_epoch_metric: float = -1 validation_dir: str = os.path.join(training_args.output_dir, "val_logs") os.makedirs(validation_dir, exist_ok=True) running_loss = 0 num_batches = 0 first = True progress_bar = tqdm( range(max_train_steps), disable=not accelerator.is_local_main_process, ascii=True, desc="Training", ) for epoch in range(int(training_args.num_train_epochs)): model.train() for step, batch in enumerate(train_dataloader): ### DEBUG ### if first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) decodeable_labels = batch.labels.clone() decodeable_labels[ decodeable_labels == -100 ] = tokenizer.pad_token_id labels = "\n".join( tokenizer.batch_decode( decodeable_labels, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"-- Labels --\n{labels}") print(f"*** End of sample ***\n") first = False loss = compute_loss(model=model, inputs=batch, return_outputs=False) running_loss += loss.item() loss = loss / training_args.gradient_accumulation_steps accelerator.backward(loss) num_batches += 1 if ( step % training_args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1 ): optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if ( accelerator.is_local_main_process and completed_steps > 0 and (completed_steps % 10 == 0) ): wandb.log( { "Train/Loss": loss.item(), "Train/Running Loss": loss.item() / num_batches, "Train/Learning Rate": optimizer.param_groups[0]["lr"], "epoch": epoch, "step": completed_steps, } ) if ( training_args.eval_steps is not None and completed_steps % training_args.eval_steps == 0 and dev_dataloader is not None ): gen_predictions( model=model, tokenizer=tokenizer, true_tokens_ids=true_tokens_ids, false_tokens_ids=false_tokens_ids, dataloader=dev_dataloader, output_path=os.path.join( validation_dir, f"step_{completed_steps}.preds", ), accelerator=accelerator, predict_with_generate=training_args.predict_with_generate, ) if accelerator.is_main_process:
results = evaluate(
2
2023-10-18 10:24:48+00:00
16k
Glasgow-AI4BioMed/GenKIE
tasks/pretrain_tasks/unify_task.py
[ { "identifier": "OFATask", "path": "tasks/ofa_task.py", "snippet": "class OFATask(FairseqTask):\n def __init__(self, cfg: OFAConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg: DictConfig, **kwargs):\n \"\"\"Setup the task.\"\"\"\n\n # load dictionaries\n src_dict = cls.load_dictionary(\n os.path.join(cfg.bpe_dir, \"dict.txt\")\n )\n tgt_dict = cls.load_dictionary(\n os.path.join(cfg.bpe_dir, \"dict.txt\")\n )\n src_dict.add_symbol(\"<mask>\")\n tgt_dict.add_symbol(\"<mask>\")\n for i in range(cfg.code_dict_size):\n src_dict.add_symbol(\"<code_{}>\".format(i))\n tgt_dict.add_symbol(\"<code_{}>\".format(i))\n # quantization\n for i in range(cfg.num_bins):\n src_dict.add_symbol(\"<bin_{}>\".format(i))\n tgt_dict.add_symbol(\"<bin_{}>\".format(i))\n\n src_dict.add_symbol(\"<dsep>\")\n tgt_dict.add_symbol(\"<dsep>\")\n\n # self.sep_index = self.add_symbol('50257')\n src_dict.add_symbol(\"50257\")\n tgt_dict.add_symbol(\"50257\")\n\n logger.info(\"source dictionary: {} types\".format(len(src_dict)))\n logger.info(\"target dictionary: {} types\".format(len(tgt_dict)))\n return cls(cfg, src_dict, tgt_dict)\n\n def get_batch_iterator(\n self,\n dataset,\n max_tokens=None,\n max_sentences=None,\n max_positions=None,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n num_shards=1,\n shard_id=0,\n num_workers=0,\n epoch=1,\n data_buffer_size=0,\n disable_iterator_cache=False,\n ):\n assert isinstance(dataset, FairseqDataset)\n\n # initialize the dataset with the correct starting epoch\n dataset.set_epoch(epoch)\n\n # create mini-batches with given size constraints\n batch_sampler = [\n [j for j in range(i, min(i + max_sentences, len(dataset)))]\n for i in range(0, len(dataset), max_sentences)\n ]\n total_row_count = dataset.dataset.get_total_row_count()\n num_batches = math.ceil(math.ceil(total_row_count / num_shards) / max_sentences)\n if len(batch_sampler) < num_batches:\n batch_sampler.append([])\n\n # return a reusable, sharded iterator\n epoch_iter = iterators.EpochBatchIterator(\n dataset=dataset,\n collate_fn=dataset.collater,\n batch_sampler=batch_sampler,\n seed=seed,\n num_shards=1,\n shard_id=0,\n num_workers=num_workers,\n epoch=epoch,\n buffer_size=data_buffer_size\n )\n\n return epoch_iter\n\n def build_model(self, cfg: FairseqDataclass):\n model = super().build_model(cfg)\n if self.cfg.bpe == 'bert': # self.cfg.bpe=None\n bpe_dict = {\n \"_name\": \"bert\",\n \"bpe_vocab_file\": os.path.join(self.cfg.bpe_dir, \"vocab.txt\"),\n \"bpe_cased\": False\n }\n bpe_dict = DictConfig(bpe_dict)\n self.bpe = self.build_bpe(bpe_dict)\n else:\n bpe_dict = {\n \"_name\": \"gpt2\",\n \"gpt2_encoder_json\": os.path.join(self.cfg.bpe_dir, \"encoder.json\"),\n \"gpt2_vocab_bpe\": os.path.join(self.cfg.bpe_dir, \"vocab.bpe\")\n }\n bpe_dict = DictConfig(bpe_dict)\n self.bpe = self.build_bpe(bpe_dict)\n return model\n\n def build_generator(\n self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None,\n ):\n \"\"\"\n Build a :class:`~fairseq.SequenceGenerator` instance for this\n task.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n args (fairseq.dataclass.configs.GenerationConfig):\n configuration object (dataclass) for generation\n extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass\n through to SequenceGenerator\n prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):\n If provided, this function constrains the beam search to\n allowed tokens only at each step. The provided function\n should take 2 arguments: the batch ID (`batch_id: int`)\n and a unidimensional tensor of token ids (`inputs_ids:\n torch.Tensor`). It has to return a `List[int]` with the\n allowed tokens for the next generation step conditioned\n on the previously generated tokens (`inputs_ids`) and\n the batch ID (`batch_id`). This argument is useful for\n constrained generation conditioned on the prefix, as\n described in \"Autoregressive Entity Retrieval\"\n (https://arxiv.org/abs/2010.00904) and\n https://github.com/facebookresearch/GENRE.\n \"\"\"\n if getattr(args, \"score_reference\", False):\n from fairseq.sequence_scorer import SequenceScorer\n\n return SequenceScorer(\n self.target_dictionary,\n compute_alignment=getattr(args, \"print_alignment\", False),\n )\n\n from fairseq.sequence_generator import (\n # SequenceGenerator,\n SequenceGeneratorWithAlignment,\n )\n from models.sequence_generator import SequenceGenerator\n\n # Choose search strategy. Defaults to Beam Search.\n sampling = getattr(args, \"sampling\", False)\n sampling_topk = getattr(args, \"sampling_topk\", -1)\n sampling_topp = getattr(args, \"sampling_topp\", -1.0)\n diverse_beam_groups = getattr(args, \"diverse_beam_groups\", -1)\n diverse_beam_strength = getattr(args, \"diverse_beam_strength\", 0.5)\n match_source_len = getattr(args, \"match_source_len\", False)\n diversity_rate = getattr(args, \"diversity_rate\", -1)\n constrained = getattr(args, \"constraints\", False)\n if prefix_allowed_tokens_fn is None:\n prefix_allowed_tokens_fn = getattr(args, \"prefix_allowed_tokens_fn\", None)\n if (\n sum(\n int(cond)\n for cond in [\n sampling,\n diverse_beam_groups > 0,\n match_source_len,\n diversity_rate > 0,\n ]\n )\n > 1\n ):\n raise ValueError(\"Provided Search parameters are mutually exclusive.\")\n assert sampling_topk < 0 or sampling, \"--sampling-topk requires --sampling\"\n assert sampling_topp < 0 or sampling, \"--sampling-topp requires --sampling\"\n\n if sampling:\n search_strategy = search.Sampling(\n self.target_dictionary, sampling_topk, sampling_topp\n )\n elif diverse_beam_groups > 0:\n search_strategy = search.DiverseBeamSearch(\n self.target_dictionary, diverse_beam_groups, diverse_beam_strength\n )\n elif match_source_len:\n # this is useful for tagging applications where the output\n # length should match the input length, so we hardcode the\n # length constraints for simplicity\n search_strategy = search.LengthConstrainedBeamSearch(\n self.target_dictionary,\n min_len_a=1,\n min_len_b=0,\n max_len_a=1,\n max_len_b=0,\n )\n elif diversity_rate > -1:\n search_strategy = search.DiverseSiblingsSearch(\n self.target_dictionary, diversity_rate\n )\n elif constrained:\n search_strategy = search.LexicallyConstrainedBeamSearch(\n self.target_dictionary, args.constraints\n )\n elif prefix_allowed_tokens_fn:\n search_strategy = search.PrefixConstrainedBeamSearch(\n self.target_dictionary, prefix_allowed_tokens_fn\n )\n else:\n search_strategy = search.BeamSearch(self.target_dictionary)\n\n extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}\n if seq_gen_cls is None:\n if getattr(args, \"print_alignment\", False):\n seq_gen_cls = SequenceGeneratorWithAlignment\n extra_gen_cls_kwargs[\"print_alignment\"] = args.print_alignment\n else:\n seq_gen_cls = SequenceGenerator\n\n return seq_gen_cls(\n models,\n self.target_dictionary,\n beam_size=getattr(args, \"beam\", 5),\n max_len_a=getattr(args, \"max_len_a\", 0),\n max_len_b=getattr(args, \"max_len_b\", 200),\n min_len=getattr(args, \"min_len\", 1),\n normalize_scores=(not getattr(args, \"unnormalized\", False)),\n len_penalty=getattr(args, \"lenpen\", 1),\n unk_penalty=getattr(args, \"unkpen\", 0),\n temperature=getattr(args, \"temperature\", 1.0),\n match_source_len=getattr(args, \"match_source_len\", False),\n no_repeat_ngram_size=getattr(args, \"no_repeat_ngram_size\", 0),\n search_strategy=search_strategy,\n constraint_range=self.cfg.constraint_range,\n **extra_gen_cls_kwargs,\n )\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False, **extra_kwargs\n ):\n \"\"\"\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.FairseqDataset`.\n model (~fairseq.models.BaseFairseqModel): the model\n criterion (~fairseq.criterions.FairseqCriterion): the criterion\n optimizer (~fairseq.optim.FairseqOptimizer): the optimizer\n update_num (int): the current update\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n \"\"\"\n model.train()\n model.set_num_updates(update_num)\n with torch.autograd.profiler.record_function(\"forward\"):\n with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):\n loss, sample_size, logging_output = criterion(model, sample, update_num=update_num)\n if ignore_grad:\n loss *= 0\n with torch.autograd.profiler.record_function(\"backward\"):\n optimizer.backward(loss)\n return loss, sample_size, logging_output\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict" }, { "identifier": "OFAConfig", "path": "tasks/ofa_task.py", "snippet": "class OFAConfig(FairseqDataclass):\n data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"comma separated path to data list, will be iterated upon during epochs \"\n \"in round-robin manner; valid data are always in the last\"\n },\n )\n selected_cols: Optional[str] = field(\n default=None,\n metadata={\"help\": \"selected cols\"},\n )\n bpe: Optional[str] = field(\n default='gpt2',\n metadata={\"help\": \"which bpe to use\"},\n )\n bpe_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"bpe dir\"},\n )\n max_source_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the source sequence\"}\n )\n max_target_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the target sequence\"}\n )\n max_src_length: int = field(\n default=128, metadata={\"help\": \"the maximum src sequence length\"}\n )\n max_tgt_length: int = field(\n default=30, metadata={\"help\": \"the maximum target sequence length\"}\n )\n\n code_dict_size: int = field(\n default=8192, metadata={\"help\": \"code dict size\"}\n )\n patch_image_size: int = field(\n default=480, metadata={\"help\": \"patch image size\"}\n )\n orig_patch_image_size: int = field(\n default=256, metadata={\"help\": \"patch image size\"}\n )\n num_bins: int = field(\n default=1000, metadata={\"help\": \"number of quantization bins\"}\n )\n\n imagenet_default_mean_and_std: bool = field(\n default=False,\n metadata={\"help\": \"imagenet normalize\"},\n )\n constraint_range: Optional[str] = field(\n default=None,\n metadata={\"help\": \"constraint range\"}\n )" }, { "identifier": "UnifyDataset", "path": "data/pretrain_data/unify_dataset.py", "snippet": "class UnifyDataset(OFADataset):\n def __init__(\n self,\n split,\n dataset,\n bpe,\n src_dict,\n tgt_dict=None,\n max_src_length=128,\n max_tgt_length=30,\n seed=7,\n code_dict_size=8192,\n num_bins=1000,\n patch_image_size=384,\n code_image_size=128,\n pure_text_dataset=None,\n pure_image_dataset=None,\n detection_dataset=None,\n all_object_list=None,\n all_caption_list=None,\n type2ans_dict=None,\n ans2type_dict=None,\n max_image_size=512,\n mask_ratio=0.3,\n random_ratio=0.0,\n keep_ratio=0.0,\n mask_length=\"span-poisson\",\n poisson_lambda=3.0,\n replace_length=1\n ):\n super().__init__(split, dataset, bpe, src_dict, tgt_dict)\n self.max_src_length = max_src_length\n self.max_tgt_length = max_tgt_length\n self.seed = seed\n self.code_dict_size = code_dict_size\n self.num_bins = num_bins\n self.patch_image_size = patch_image_size\n self.code_image_size = code_image_size\n\n self.pure_text_dataset = pure_text_dataset\n self.pure_image_dataset = pure_image_dataset\n self.detection_dataset = detection_dataset\n self.epoch = 0\n\n self.all_object_list = all_object_list\n self.all_caption_list = all_caption_list\n self.type2ans_dict = type2ans_dict\n self.ans2type_dict = ans2type_dict\n\n self.mask_ratio = mask_ratio\n self.random_ratio = random_ratio\n self.keep_ratio = keep_ratio\n self.mask_length = mask_length\n self.poisson_lambda = poisson_lambda\n self.replace_length = replace_length\n if self.replace_length not in [-1, 0, 1]:\n raise ValueError(f\"invalid arg: replace_length={self.replace_length}\")\n if self.mask_length not in [\"subword\", \"word\", \"span-poisson\"]:\n raise ValueError(f\"invalid arg: mask-length={self.mask_length}\")\n if self.mask_length == \"subword\" and self.replace_length not in [0, 1]:\n raise ValueError(f\"if using subwords, use replace-length=1 or 0\")\n\n self.mask_idx = src_dict.index(\"<mask>\")\n self.mask_whole_word = (\n get_whole_word_mask(self.bpe, self.src_dict)\n if self.mask_length != \"subword\"\n else None\n )\n self.mask_span_distribution = None\n if self.mask_length == \"span-poisson\":\n _lambda = self.poisson_lambda\n lambda_to_the_k = 1\n e_to_the_minus_lambda = math.exp(-_lambda)\n k_factorial = 1\n ps = []\n for k in range(0, 128):\n ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)\n lambda_to_the_k *= _lambda\n k_factorial *= k + 1\n if ps[-1] < 0.0000001:\n break\n ps = torch.FloatTensor(ps)\n self.mask_span_distribution = torch.distributions.Categorical(ps)\n\n self.pos_tgt_item = self.encode_text(\" yes\")\n self.neg_tgt_item = self.encode_text(\" no\")\n\n self.mask_left = self.mask_top = int(0.5 * self.code_image_size)\n self.mask_right = self.mask_bottom = int(1.5 * self.code_image_size)\n self.mask_ids = [\n i*self.code_image_size*2+j\n for i in range(self.code_image_size*2) for j in range(self.code_image_size*2)\n if not (self.mask_left <= i < self.mask_right and self.mask_top <= j < self.mask_bottom)\n ]\n\n scales = np.arange(patch_image_size, 481).tolist()\n\n # for image-text pair\n self.patch_resize_transform = transforms.Compose([\n T.RandomResize(scales, max_size=672),\n transforms.CenterCrop(patch_image_size),\n RandomAugment(2, 7, isPIL=True, augs=['Identity', 'AutoContrast', 'Equalize', 'Brightness', 'Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n # for pure image\n self.patch_crop_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n # for detection\n self.detection_transform = T.Compose([\n T.RandomHorizontalFlip(),\n T.LargeScaleJitter(output_size=self.code_image_size*2, aug_scale_min=1.0, aug_scale_max=1.5),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_image_size=max_image_size)\n ])\n # for visual grounding\n self.visual_grounding_transform = T.Compose([\n T.RandomResize(scales, max_size=672),\n T.ObjectCenterCrop((patch_image_size, patch_image_size)),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_image_size=max_image_size)\n ])\n\n def set_epoch(self, epoch, **unused):\n self.epoch = epoch\n\n def get_negative_caption(self, caption, gt_objects):\n prob = random.random()\n if gt_objects is not None and gt_objects != '' and prob > 0.6:\n gt_object = random.choice(gt_objects.strip().split('&&'))\n negative_object = random.choice(self.all_object_list[:-1])\n negative_object = self.all_object_list[-1] if negative_object == gt_object else negative_object\n negative_caption = caption.replace(gt_object, negative_object)\n else:\n negative_caption = random.choice(self.all_caption_list)\n return negative_caption\n\n def get_negative_answer(self, answer, conf):\n prob = random.random()\n if conf > (prob + 0.1) and answer in self.ans2type_dict:\n negative_answer_type = self.ans2type_dict[answer]\n if negative_answer_type == 'how many' and answer.isdigit() and prob > 0.5:\n negative_answer = int(answer) + random.choice([-1, 1]) if answer != 0 else 1\n else:\n negative_answer_list = self.type2ans_dict[negative_answer_type]\n negative_answer = random.choice(negative_answer_list[:-1])\n negative_answer = negative_answer_list[-1] if negative_answer == answer else negative_answer\n return negative_answer\n\n negative_answer_list = self.type2ans_dict['other']\n negative_answer = random.choice(negative_answer_list[:-1])\n negative_answer = negative_answer_list[-1] if negative_answer == answer else negative_answer\n return negative_answer\n\n def process_image_text_pair(self, index):\n uniq_id, image, caption, question, refs, gt_objects, dataset_name, type = self.dataset[index]\n\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n patch_image = self.patch_resize_transform(image) if type != 'visual_grounding' else None\n patch_mask = torch.tensor([True])\n conf = torch.tensor([1.0])\n if type == 'caption':\n tgt_caption = self.pre_caption(caption, self.max_tgt_length)\n pos_src_caption = self.pre_caption(caption, self.max_src_length)\n neg_src_caption = self.pre_caption(self.get_negative_caption(caption, gt_objects), self.max_src_length)\n src_item = self.encode_text(\" what does the image describe?\")\n tgt_item = self.encode_text(\" {}\".format(tgt_caption))\n pos_src_item = self.encode_text(' does the image describe \" {} \"?'.format(pos_src_caption))\n neg_src_item = self.encode_text(' does the image describe \" {} \"?'.format(neg_src_caption))\n elif type == 'qa':\n question = self.pre_question(question, self.max_src_length)\n ref_dict = {item.split('|!+')[1]: float(item.split('|!+')[0]) for item in refs.split('&&')}\n answer = max(ref_dict, key=ref_dict.get)\n conf = ref_dict[answer]\n src_item = self.encode_text(\" {}\".format(question))\n tgt_item = self.encode_text(\" {}\".format(answer))\n conf = torch.tensor([conf])\n pos_src_item = self.encode_text(' what is the answer to question \" {} \". is \" {} \"?'.format(question, answer))\n neg_src_item = self.encode_text(\n ' what is the answer to question \" {} \". is \" {} \"?'.format(question, self.get_negative_answer(answer, conf))\n )\n elif type == 'visual_grounding':\n conf = torch.tensor([1.0])\n w, h = image.size\n boxes_target = {\"boxes\": [], \"labels\": [], \"area\": [], \"size\": torch.tensor([h, w])}\n x0, y0, x1, y1 = refs.strip().split(',')\n boxes_target[\"boxes\"] = torch.tensor([[float(x0), float(y0), float(x1), float(y1)]])\n boxes_target[\"labels\"] = np.array([0])\n boxes_target[\"area\"] = torch.tensor([(float(x1) - float(x0)) * (float(y1) - float(y0))])\n patch_image, boxes_target = self.visual_grounding_transform(image, boxes_target)\n quant_x0 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][0] * (self.num_bins - 1)).round()))\n quant_y0 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][1] * (self.num_bins - 1)).round()))\n quant_x1 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][2] * (self.num_bins - 1)).round()))\n quant_y1 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][3] * (self.num_bins - 1)).round()))\n region_coord = \"{} {} {} {}\".format(quant_x0, quant_y0, quant_x1, quant_y1)\n src_caption = self.pre_caption(caption, self.max_src_length)\n src_item = self.encode_text(' which region does the text \" {} \" describe?'.format(src_caption))\n tgt_item = self.encode_text(region_coord, use_bpe=False)\n else:\n logger.info('type {} is not implemented'.format(type))\n raise NotImplementedError\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n pos_src_item = torch.cat([self.bos_item, pos_src_item, self.eos_item]) if type != 'visual_grounding' else None\n neg_src_item = torch.cat([self.bos_item, neg_src_item, self.eos_item]) if type != 'visual_grounding' else None\n\n if type == 'caption' and dataset_name == 'cc12m':\n target_item[:2] = self.src_dict.pad()\n target_item[-1] = self.eos_item\n\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n\n examples = [example]\n prob = random.random()\n if type == 'visual_grounding':\n region_example = example.copy()\n region_prefix_item = self.encode_text(' what does the region describe? region:')\n region_coord_item = self.encode_text('{}'.format(region_coord), use_bpe=False)\n region_src_item = torch.cat([region_prefix_item, region_coord_item])\n region_tgt_item = self.encode_text(' {}'.format(self.pre_caption(caption, self.max_tgt_length)))\n region_example[\"source\"] = torch.cat([self.bos_item, region_src_item, self.eos_item])\n region_example[\"target\"] = torch.cat([region_tgt_item, self.eos_item])\n region_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, region_tgt_item])\n region_example[\"conf\"] = torch.tensor([1.0])\n examples.append(region_example)\n elif prob >= 0.5 and self.split == 'train':\n pos_example = example.copy()\n pos_example[\"source\"] = pos_src_item\n pos_example[\"target\"] = torch.cat([self.pos_tgt_item, self.eos_item])\n pos_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, self.pos_tgt_item])\n examples.append(pos_example)\n elif self.split == 'train':\n neg_example = example.copy()\n neg_example[\"source\"] = neg_src_item\n neg_example[\"target\"] = torch.cat([self.neg_tgt_item, self.eos_item])\n neg_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, self.neg_tgt_item])\n examples.append(neg_example)\n return examples\n\n def process_pure_text(self, index):\n patch_image = torch.zeros((3, self.code_image_size*2, self.code_image_size*2))\n patch_mask = torch.tensor([False])\n code_mask = torch.tensor([False])\n conf = torch.tensor([2.0])\n\n examples = []\n for _ in range(2):\n uniq_id, text = self.pure_text_dataset[index]\n text = text.strip().lower()\n text_item = self.encode_text(\" {}\".format(text), length=512)\n text_item = text_item[-256:]\n text_item = torch.cat([self.bos_item, text_item, self.eos_item])\n mask_text_item = self.add_whole_word_mask(text_item.clone(), self.mask_ratio)\n prefix_item = self.encode_text(' what is the complete text of \" \"?')\n src_item = torch.cat([prefix_item[:-2], mask_text_item[1:-1], prefix_item[-2:]])\n tgt_item = text_item[1:-1]\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n examples.append(example)\n\n return examples\n\n def process_pure_image(self, index):\n image_id, image, code = self.pure_image_dataset[index]\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n patch_image = self.patch_crop_transform(image)\n patch_image[:, self.mask_top:self.mask_bottom, self.mask_left:self.mask_right] = 0\n patch_mask = torch.tensor([True])\n src_item = self.encode_text(\" what is the image in the middle part?\")\n image_code = torch.LongTensor([int(num) for num in code.strip().split()])\n tgt_item = image_code + len(self.src_dict) - self.code_dict_size - self.num_bins\n code_mask = torch.tensor([True])\n conf = torch.tensor([2.0])\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n\n example = {\n \"id\": image_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n return [example]\n\n def process_detection(self, index):\n image_id, image, label = self.detection_dataset[index]\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n\n w, h = image.size\n boxes_target = {\"boxes\": [], \"labels\": [], \"area\": [], \"size\": torch.tensor([h, w])}\n label_list = label.strip().split('&&')\n for label in label_list:\n x0, y0, x1, y1, cat_id, cat = label.strip().split(',', 5)\n boxes_target[\"boxes\"].append([float(x0), float(y0), float(x1), float(y1)])\n boxes_target[\"labels\"].append(cat)\n boxes_target[\"area\"].append((float(x1) - float(x0)) * (float(y1) - float(y0)))\n boxes_target[\"boxes\"] = torch.tensor(boxes_target[\"boxes\"])\n boxes_target[\"labels\"] = np.array(boxes_target[\"labels\"])\n boxes_target[\"area\"] = torch.tensor(boxes_target[\"area\"])\n\n patch_image, boxes_target = self.detection_transform(image, boxes_target)\n patch_mask = torch.tensor([True])\n code_mask = torch.tensor([False])\n conf = torch.tensor([2.0])\n\n quant_boxes = []\n for i, box in enumerate(boxes_target[\"boxes\"]):\n quant_boxes.extend([\"<bin_{}>\".format(int((pos * (self.num_bins - 1)).round())) for pos in box[:4]])\n quant_boxes.append(self.bpe.encode(' {}'.format(boxes_target[\"labels\"][i])))\n src_item = self.encode_text(' what are the objects in the image?')\n tgt_item = self.encode_text(' '.join(quant_boxes), use_bpe=False)\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n\n example = {\n \"id\": image_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n return [example]\n\n def __getitem__(self, index):\n with data_utils.numpy_seed(self.seed, self.epoch):\n pair_samples = self.process_image_text_pair(index)\n extra_samples = []\n if self.split == 'train' and self.dataset.data_cnt % 8 == 0:\n extra_samples += self.process_pure_text(0) if self.pure_text_dataset else []\n extra_samples += self.process_pure_image(0) if self.pure_image_dataset else []\n extra_samples += self.process_detection(0) if self.detection_dataset else []\n return pair_samples, extra_samples\n\n def word_starts(self, source):\n if self.mask_whole_word is not None:\n is_word_start = self.mask_whole_word.gather(0, source)\n else:\n is_word_start = torch.ones(source.size())\n is_word_start[0] = 0\n is_word_start[-1] = 0\n return is_word_start\n\n def add_whole_word_mask(self, source, p):\n is_word_start = self.word_starts(source)\n num_to_mask = int(math.ceil(is_word_start.float().sum() * p))\n num_inserts = 0\n if num_to_mask == 0:\n return source\n\n if self.mask_span_distribution is not None:\n lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))\n\n # Make sure we have enough to mask\n cum_length = torch.cumsum(lengths, 0)\n while cum_length[-1] < num_to_mask:\n lengths = torch.cat(\n [\n lengths,\n self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),\n ],\n dim=0,\n )\n cum_length = torch.cumsum(lengths, 0)\n\n # Trim to masking budget\n i = 0\n while cum_length[i] < num_to_mask:\n i += 1\n lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])\n num_to_mask = i + 1\n lengths = lengths[:num_to_mask]\n\n # Handle 0-length mask (inserts) separately\n lengths = lengths[lengths > 0]\n num_inserts = num_to_mask - lengths.size(0)\n num_to_mask -= num_inserts\n if num_to_mask == 0:\n return self.add_insertion_noise(source, num_inserts / source.size(0))\n\n assert (lengths > 0).all()\n else:\n lengths = torch.ones((num_to_mask,)).long()\n assert is_word_start[-1] == 0\n word_starts = is_word_start.nonzero(as_tuple=False)\n indices = word_starts[\n torch.randperm(word_starts.size(0))[:num_to_mask]\n ].squeeze(1)\n mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio\n\n source_length = source.size(0)\n assert source_length - 1 not in indices\n to_keep = torch.ones(source_length, dtype=torch.bool)\n is_word_start[\n -1\n ] = 255 # acts as a long length, so spans don't go over the end of doc\n if self.replace_length == 0:\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n\n if self.mask_span_distribution is not None:\n assert len(lengths.size()) == 1\n assert lengths.size() == indices.size()\n lengths -= 1\n while indices.size(0) > 0:\n assert lengths.size() == indices.size()\n lengths -= is_word_start[indices + 1].long()\n uncompleted = lengths >= 0\n indices = indices[uncompleted] + 1\n mask_random = mask_random[uncompleted]\n lengths = lengths[uncompleted]\n if self.replace_length != -1:\n # delete token\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n else:\n # A bit faster when all lengths are 1\n while indices.size(0) > 0:\n uncompleted = is_word_start[indices + 1] == 0\n indices = indices[uncompleted] + 1\n mask_random = mask_random[uncompleted]\n if self.replace_length != -1:\n # delete token\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n\n assert source_length - 1 not in indices\n\n source = source[to_keep]\n\n if num_inserts > 0:\n source = self.add_insertion_noise(source, num_inserts / source.size(0))\n\n return source\n\n def add_insertion_noise(self, tokens, p):\n if p == 0.0:\n return tokens\n\n num_tokens = len(tokens)\n n = int(math.ceil(num_tokens * p))\n\n noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1\n noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)\n noise_mask[noise_indices] = 1\n result = torch.LongTensor(n + len(tokens)).fill_(-1)\n\n num_random = int(math.ceil(n * self.random_ratio))\n result[noise_indices[num_random:]] = self.mask_idx\n result[noise_indices[:num_random]] = torch.randint(\n low=4, high=len(self.tgt_dict)-self.code_dict_size-self.num_bins, size=(num_random,)\n )\n\n result[~noise_mask] = tokens\n\n assert (result >= 0).all()\n return result\n\n def collater(self, samples, pad_to_length=None):\n \"\"\"Merge samples of different tasks to form two mini-batches.\n Args:\n samples (List[Tuple]): samples to collate\n Returns:\n Tuple[dict]: two mini-batch containing the data of different tasks\n \"\"\"\n\n samples_v1 = [] # containing image-text pairs\n samples_v2 = [] # containing detection data, text data and image data\n for sample_tuple in samples:\n samples_v1 += sample_tuple[0]\n samples_v2 += sample_tuple[1]\n if samples_v2 != []:\n res_v1 = collate(samples_v1, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n res_v2 = collate(samples_v2, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n return res_v1, res_v2\n else:\n res_v1 = collate(samples_v1, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n return res_v1" }, { "identifier": "FileDataset", "path": "data/file_dataset.py", "snippet": "class FileDataset:\n def __init__(self, file_path, selected_col_ids=None, dtypes=None, separator=\"\\t\", cached_index=False):\n self.file_path = file_path\n assert os.path.exists(self.file_path), \"Error: The local datafile {} not exists!\".format(self.file_path)\n\n self.separator = separator\n if selected_col_ids is None:\n # default to all fields\n self.selected_col_ids = list(\n range(len(open(self.file_path).readline().rstrip(\"\\n\").split(self.separator))))\n else:\n self.selected_col_ids = [int(col_id) for col_id in selected_col_ids.split(\",\")]\n if dtypes is None:\n # default to str\n self.dtypes = [str for col_id in self.selected_col_ids]\n else:\n self.dtypes = [eval(col_dtype) for col_dtype in dtypes.split(\",\")]\n assert len(self.dtypes) == len(self.selected_col_ids)\n\n self.data_cnt = 0\n try:\n self.slice_id = torch.distributed.get_rank()\n self.slice_count = torch.distributed.get_world_size()\n except Exception:\n self.slice_id = 0\n self.slice_count = 1\n self.cached_index = cached_index\n self._init_seek_index()\n self._reader = self._get_reader()\n print(\"file {} slice_id {} row count {} total row count {}\".format(\n self.file_path, self.slice_id, self.row_count, self.total_row_count)\n )\n\n def _init_seek_index(self):\n if self.cached_index:\n cache_path = \"{}.index\".format(self.file_path)\n assert os.path.exists(cache_path), \"cache file {} not exists!\".format(cache_path)\n self.total_row_count, self.lineid_to_offset = pickle.load(open(cache_path, \"rb\"))\n print(\"local datafile {} slice_id {} use cached row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n else:\n # make an iteration over the file to get row_count and line_idx-to-offset mapping\n fp = open(self.file_path, \"r\")\n print(\"local datafile {} slice_id {} begin to initialize row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n self.total_row_count = 0\n offset = 0\n self.lineid_to_offset = []\n for line in fp:\n self.lineid_to_offset.append(offset)\n self.total_row_count += 1\n offset += len(line.encode('utf-8'))\n self._compute_start_pos_and_row_count()\n print(\"local datafile {} slice_id {} finished initializing row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n\n def _compute_start_pos_and_row_count(self):\n self.row_count = self.total_row_count // self.slice_count\n if self.slice_id < self.total_row_count - self.row_count * self.slice_count:\n self.row_count += 1\n self.start_pos = self.row_count * self.slice_id\n else:\n self.start_pos = self.row_count * self.slice_id + (self.total_row_count - self.row_count * self.slice_count)\n\n def _get_reader(self):\n fp = open(self.file_path, \"r\")\n fp.seek(self.lineid_to_offset[self.start_pos])\n return fp\n\n def _seek(self, offset=0):\n try:\n print(\"slice_id {} seek offset {}\".format(self.slice_id, self.start_pos + offset))\n self._reader.seek(self.lineid_to_offset[self.start_pos + offset])\n self.data_cnt = offset\n except Exception:\n print(\"slice_id {} seek offset {}\".format(self.slice_id, offset))\n self._reader.seek(self.lineid_to_offset[offset])\n self.data_cnt = offset\n\n def __del__(self):\n self._reader.close()\n\n def __len__(self):\n return self.row_count\n\n def get_total_row_count(self):\n return self.total_row_count\n\n def __getitem__(self, index):\n if self.data_cnt == self.row_count:\n print(\"reach the end of datafile, start a new reader\")\n self.data_cnt = 0\n self._reader = self._get_reader()\n column_l = self._reader.readline().rstrip(\"\\n\").split(self.separator)\n self.data_cnt += 1\n try:\n column_l = [dtype(column_l[col_id]) for col_id, dtype in zip(self.selected_col_ids, self.dtypes)]\n except IndexError:\n print('Stop')\n return column_l" } ]
from dataclasses import dataclass, field from typing import Optional from fairseq.tasks import register_task from fairseq.data import FairseqDataset, iterators from tasks.ofa_task import OFATask, OFAConfig from data.pretrain_data.unify_dataset import UnifyDataset from data.file_dataset import FileDataset import json import logging import os import math
11,143
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig)
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig)
class UnifyTask(OFATask):
0
2023-10-20 20:01:42+00:00
16k
timapage/pyqt6-yolov8
main.py
[ { "identifier": "CameraCaptureThread", "path": "src/qt/stream/video_capture.py", "snippet": "class CameraCaptureThread(QThread):\n send_video_info = pyqtSignal(dict)\n send_frame = pyqtSignal(list)\n def __init__(self):\n super(CameraCaptureThread, self).__init__()\n self.thread_name = \"CameraCaptureThread\"\n self.threadFlag = False\n \n def set_start_config(self, video_source):\n self.threadFlag = True\n self.get_video_source(video_source)\n \n def get_video_source(self, video_source):\n self.video_source = video_source\n \n def get_video_info(self, video_cap):\n video_info = {}\n video_info[\"FPS\"] = video_cap.get(cv.CAP_PROP_FPS)\n video_info[\"length\"] = int(video_cap.get(cv.CAP_PROP_FRAME_COUNT))\n video_info[\"size\"] = (int(video_cap.get(cv.CAP_PROP_FRAME_WIDTH)),int(video_cap.get(cv.CAP_PROP_FRAME_HEIGHT)))\n return video_info\n \n def stop_capture(self):\n self.threadFlag = False\n\n def run(self): \n cap = cv.VideoCapture(self.video_source)\n if not cap.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_info = self.get_video_info(cap)\n self.send_video_info.emit(video_info)\n\n idx_frame = 0\n while self.threadFlag:\n ret, frame = cap.read()\n if ret is False or self.threadFlag is False:\n break\n self.send_frame.emit(list([idx_frame,frame]))\n idx_frame += 1\n self.send_frame.emit(list([None,None]))\n cap.release()" }, { "identifier": "VideoVisualizationThread", "path": "src/qt/stream/visualize.py", "snippet": "class VideoVisualizationThread(QThread):\n send_thread_start_stop_flag = pyqtSignal(str)\n send_displayable_frame = pyqtSignal(QImage)\n send_ai_output = pyqtSignal(list)\n def __init__(self):\n super(VideoVisualizationThread, self).__init__()\n self.thread_name = \"VideoVisualizationThread\"\n self.threadFlag = False\n \n def set_start_config(self, screen_size):\n self.threadFlag = True\n self.frame_buffer = FrameBuffer(10)\n self.ai_output = []\n self.get_screen_size(screen_size)\n \n def get_fresh_frame(self, frame_list):\n self.frame_buffer.put(frame=copy.deepcopy(frame_list[1]), frame_id=frame_list[0], realtime=True)\n\n def get_ai_output(self, ai_output):\n self.ai_output = copy.deepcopy(ai_output)\n \n def get_screen_size(self, screen_size):\n self.iw, self.ih = screen_size\n \n def stop_display(self):\n self.threadFlag = False\n\n def run(self):\n self.send_thread_start_stop_flag.emit(\"processing_on_camera\")\n while self.threadFlag:\n frame_id, frame = self.frame_buffer.get()\n if frame_id is not None:\n frame = draw_results(frame, self.ai_output)\n show_image = self.convert_cv_qt(frame, self.ih, self.iw)\n self.send_displayable_frame.emit(show_image)\n self.send_ai_output.emit(self.ai_output)\n else:\n break\n blank_image = np.zeros((self.ih, self.iw, 3))\n blank_image = cv.cvtColor(blank_image.astype('uint8'), cv.COLOR_BGR2RGBA)\n show_image = QImage(blank_image.data, blank_image.shape[1], blank_image.shape[0], QImage.Format.Format_RGBA8888)\n self.send_displayable_frame.emit(show_image)\n self.send_ai_output.emit([])\n self.send_thread_start_stop_flag.emit(\"waiting_for_setting\")\n\n\n def convert_cv_qt(self, image, screen_height, screen_width):\n h, w, _ = image.shape\n scale = min(screen_width / w, screen_height / h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv.resize(image, (nw, nh))\n image_paded = np.full(shape=[screen_height, screen_width, 3], fill_value=0)\n dw, dh = (screen_width - nw) // 2, (screen_height - nh) // 2\n image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized\n image_paded = cv.cvtColor(image_paded.astype('uint8'), cv.COLOR_BGR2RGBA)\n return QImage(image_paded.data, image_paded.shape[1], image_paded.shape[0], QImage.Format.Format_RGBA8888)" }, { "identifier": "AiWorkerThread", "path": "src/qt/stream/ai_worker.py", "snippet": "class AiWorkerThread(QThread):\n send_ai_output = pyqtSignal(list)\n def __init__(self):\n super(AiWorkerThread, self).__init__()\n self.thread_name = \"AiWorkerThread\"\n self.threadFlag = False\n \n def set_start_config(self, ai_task, model_name=\"yolov8n\", confidence_threshold=0.35, iou_threshold=0.45):\n self.threadFlag = True\n self.ai_task = ai_task\n self.latest_frame = LatestFrame()\n self.confi_thr = confidence_threshold\n self.iou_thr = iou_threshold\n self.model_name = model_name\n self._init_yolo()\n self._init_tracker()\n\n def set_iou_threshold(self, iou_threshold):\n self.iou_thr = iou_threshold\n \n def set_confidence_threshold(self, confidence_threshold):\n self.confi_thr = confidence_threshold\n \n def set_model_name(self, model_name):\n self.model_name = model_name\n\n def _init_yolo(self):\n if self.ai_task == \"object_detection\":\n self.detector = YoloDetector()\n self.detector.init(\n model_path=os.path.join(ROOT, f\"weights/detection/{self.model_name}.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n self.pose_detector = PoseDetector()\n self.pose_detector.init(\n model_path=os.path.join(ROOT, f\"weights/pose/{self.model_name}-pose.onnx\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"segmentation\":\n self.seg_detector = YOLOSeg()\n self.seg_detector.init(\n model_path=os.path.join(ROOT, f\"weights/segmentation/{self.model_name}-seg.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n\n def _init_tracker(self):\n self.tracker = DeepSort(\n model_path=os.path.join(ROOT, f\"src/models/tracking/deep_sort/deep/checkpoint/ckpt.t7\"))\n \n def get_frame(self, frame_list):\n self.latest_frame.put(frame=frame_list[1], frame_id=frame_list[0], realtime=True)\n \n def stop_process(self):\n self.threadFlag = False\n \n def run(self):\n while self.threadFlag:\n frame_id, frame = self.latest_frame.get()\n if frame_id is None:\n break\n model_output = []\n if self.ai_task == \"object_detection\":\n model_output = self.detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n model_output = self.pose_detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"segmentation\":\n model_output = self.seg_detector.inference(frame, self.confi_thr, self.iou_thr)\n\n model_output = self.tracker.update(\n detection_results=model_output,\n ori_img=frame)\n \n self.model_output = add_image_id(model_output, frame_id)\n self.send_ai_output.emit(model_output)" }, { "identifier": "Ui_MainWindow", "path": "src/ui/main_window.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(878, 617)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/images/icons/icon.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n MainWindow.setWindowIcon(icon)\n MainWindow.setStyleSheet(\"background-color: rgb(119, 118, 123);\\n\"\n\"border-color: rgb(119, 118, 123);\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setStyleSheet(\"background-color: rgb(119, 118, 123);\\n\"\n\"border-color: rgb(119, 118, 123);\")\n self.centralwidget.setObjectName(\"centralwidget\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.centralwidget)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout()\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.groupBox = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox.setFont(font)\n self.groupBox.setStyleSheet(\"\")\n self.groupBox.setObjectName(\"groupBox\")\n self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox)\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.radioButton_det = QtWidgets.QRadioButton(self.groupBox)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.radioButton_det.setFont(font)\n self.radioButton_det.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"\")\n self.radioButton_det.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.Zimbabwe))\n self.radioButton_det.setChecked(True)\n self.radioButton_det.setObjectName(\"radioButton_det\")\n self.verticalLayout_4.addWidget(self.radioButton_det)\n self.radioButton_seg = QtWidgets.QRadioButton(self.groupBox)\n self.radioButton_seg.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\")\n self.radioButton_seg.setObjectName(\"radioButton_seg\")\n self.verticalLayout_4.addWidget(self.radioButton_seg)\n self.radioButton_pose = QtWidgets.QRadioButton(self.groupBox)\n self.radioButton_pose.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\"}\")\n self.radioButton_pose.setObjectName(\"radioButton_pose\")\n self.verticalLayout_4.addWidget(self.radioButton_pose)\n self.verticalLayout_2.addWidget(self.groupBox)\n self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_2.setFont(font)\n self.groupBox_2.setObjectName(\"groupBox_2\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_2)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.comboBox_model = QtWidgets.QComboBox(self.groupBox_2)\n self.comboBox_model.setAutoFillBackground(False)\n self.comboBox_model.setStyleSheet(\"QComboBox QAbstractItemView {\\n\"\n\"font-size: 16px;\\n\"\n\"outline:none;\\n\"\n\"border:none;}\\n\"\n\"\\n\"\n\"QComboBox{\\n\"\n\"font-size: 16px;\\n\"\n\"\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"border-width:0px;\\n\"\n\"border-color:white;\\n\"\n\"border-style:solid;\\n\"\n\"background-color: rgba(200, 200, 200,50);}\\n\"\n\"\\n\"\n\"QComboBox::drop-down {\\n\"\n\"margin-top:1;\\n\"\n\"height:20;\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"background-color: rgba(200, 200, 200,50);\\n\"\n\"border-image: url(:/images/icons/roll_down.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QComboBox::disabled{\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"\")\n self.comboBox_model.setCurrentText(\"YOLOv8n\")\n self.comboBox_model.setObjectName(\"comboBox_model\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.horizontalLayout_2.addWidget(self.comboBox_model)\n self.verticalLayout_2.addWidget(self.groupBox_2)\n self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_3.setFont(font)\n self.groupBox_3.setObjectName(\"groupBox_3\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.groupBox_3)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.pushButton_file = QtWidgets.QPushButton(self.groupBox_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_file.sizePolicy().hasHeightForWidth())\n self.pushButton_file.setSizePolicy(sizePolicy)\n self.pushButton_file.setStyleSheet(\"QPushButton{\\n\"\n\" image: url(:/images/icons/video.png);\\n\"\n\"font-size: 14px;\\n\"\n\"font-weight: bold;\\n\"\n\"color:white;\\n\"\n\"text-align: center center;\\n\"\n\"padding-left: 5px;\\n\"\n\"padding-right: 5px;\\n\"\n\"padding-top: 4px;\\n\"\n\"padding-bottom: 4px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-color: rgba(255, 255, 255, 255);\\n\"\n\"border-radius: 3px;\\n\"\n\"background-color: rgba(200, 200, 200,0);}\\n\"\n\"\\n\"\n\"QPushButton:focus{outline: none;}\\n\"\n\"\\n\"\n\"QPushButton::pressed{\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;\\n\"\n\" background-color: #bf513b;}\\n\"\n\"\\n\"\n\"QPushButton::disabled{\\n\"\n\" image: url(:/images/icons/video_off.png);\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(48,148,243,80);}\")\n self.pushButton_file.setText(\"\")\n self.pushButton_file.setObjectName(\"pushButton_file\")\n self.horizontalLayout_3.addWidget(self.pushButton_file)\n self.pushButton_cam = QtWidgets.QPushButton(self.groupBox_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_cam.sizePolicy().hasHeightForWidth())\n self.pushButton_cam.setSizePolicy(sizePolicy)\n self.pushButton_cam.setStyleSheet(\"QPushButton{\\n\"\n\" image: url(:/images/icons/camera_on.png);\\n\"\n\"font-size: 14px;\\n\"\n\"font-weight: bold;\\n\"\n\"color:white;\\n\"\n\"text-align: center center;\\n\"\n\"padding-left: 5px;\\n\"\n\"padding-right: 5px;\\n\"\n\"padding-top: 4px;\\n\"\n\"padding-bottom: 4px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-color: rgba(255, 255, 255, 255);\\n\"\n\"border-radius: 3px;\\n\"\n\"background-color: rgba(200, 200, 200,0);}\\n\"\n\"\\n\"\n\"QPushButton:focus{outline: none;}\\n\"\n\"\\n\"\n\"QPushButton::pressed{\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;\\n\"\n\" background-color: #bf513b;}\\n\"\n\"\\n\"\n\"QPushButton::disabled{\\n\"\n\" image: url(:/images/icons/camera_off.png);\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(48,148,243,80);}url(:/images/icons/camera_on.png)\")\n self.pushButton_cam.setText(\"\")\n self.pushButton_cam.setObjectName(\"pushButton_cam\")\n self.horizontalLayout_3.addWidget(self.pushButton_cam)\n self.verticalLayout_2.addWidget(self.groupBox_3)\n self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_4.setFont(font)\n self.groupBox_4.setObjectName(\"groupBox_4\")\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.groupBox_4)\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.doubleSpinBox_conf = QtWidgets.QDoubleSpinBox(self.groupBox_4)\n self.doubleSpinBox_conf.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_conf.setMaximum(1.0)\n self.doubleSpinBox_conf.setSingleStep(0.01)\n self.doubleSpinBox_conf.setStepType(QtWidgets.QAbstractSpinBox.StepType.AdaptiveDecimalStepType)\n self.doubleSpinBox_conf.setProperty(\"value\", 0.3)\n self.doubleSpinBox_conf.setObjectName(\"doubleSpinBox_conf\")\n self.horizontalLayout_5.addWidget(self.doubleSpinBox_conf)\n self.horizontalSlider_conf = QtWidgets.QSlider(self.groupBox_4)\n self.horizontalSlider_conf.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_conf.setMaximum(99)\n self.horizontalSlider_conf.setSingleStep(1)\n self.horizontalSlider_conf.setPageStep(99)\n self.horizontalSlider_conf.setProperty(\"value\", 30)\n self.horizontalSlider_conf.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_conf.setObjectName(\"horizontalSlider_conf\")\n self.horizontalLayout_5.addWidget(self.horizontalSlider_conf)\n self.verticalLayout_2.addWidget(self.groupBox_4)\n self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_5.setFont(font)\n self.groupBox_5.setObjectName(\"groupBox_5\")\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.groupBox_5)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.doubleSpinBox_iou = QtWidgets.QDoubleSpinBox(self.groupBox_5)\n self.doubleSpinBox_iou.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_iou.setMaximum(1.0)\n self.doubleSpinBox_iou.setSingleStep(0.01)\n self.doubleSpinBox_iou.setStepType(QtWidgets.QAbstractSpinBox.StepType.AdaptiveDecimalStepType)\n self.doubleSpinBox_iou.setProperty(\"value\", 0.45)\n self.doubleSpinBox_iou.setObjectName(\"doubleSpinBox_iou\")\n self.horizontalLayout_6.addWidget(self.doubleSpinBox_iou)\n self.horizontalSlider_iou = QtWidgets.QSlider(self.groupBox_5)\n self.horizontalSlider_iou.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_iou.setProperty(\"value\", 45)\n self.horizontalSlider_iou.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_iou.setObjectName(\"horizontalSlider_iou\")\n self.horizontalLayout_6.addWidget(self.horizontalSlider_iou)\n self.verticalLayout_2.addWidget(self.groupBox_5)\n self.groupBox_6 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_6.setFont(font)\n self.groupBox_6.setObjectName(\"groupBox_6\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.groupBox_6)\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.doubleSpinBox_interval = QtWidgets.QDoubleSpinBox(self.groupBox_6)\n self.doubleSpinBox_interval.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_interval.setDecimals(0)\n self.doubleSpinBox_interval.setMaximum(10.0)\n self.doubleSpinBox_interval.setObjectName(\"doubleSpinBox_interval\")\n self.horizontalLayout_7.addWidget(self.doubleSpinBox_interval)\n self.horizontalSlider_interval = QtWidgets.QSlider(self.groupBox_6)\n self.horizontalSlider_interval.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_interval.setMaximum(10)\n self.horizontalSlider_interval.setPageStep(1)\n self.horizontalSlider_interval.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_interval.setObjectName(\"horizontalSlider_interval\")\n self.horizontalLayout_7.addWidget(self.horizontalSlider_interval)\n self.verticalLayout_2.addWidget(self.groupBox_6)\n self.verticalLayout_2.setStretch(0, 3)\n self.verticalLayout_2.setStretch(1, 1)\n self.verticalLayout_2.setStretch(2, 2)\n self.verticalLayout_2.setStretch(3, 1)\n self.verticalLayout_2.setStretch(4, 1)\n self.verticalLayout_2.setStretch(5, 1)\n self.horizontalLayout.addLayout(self.verticalLayout_2)\n self.verticalLayout_3 = QtWidgets.QVBoxLayout()\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.label_display = QtWidgets.QLabel(self.centralwidget)\n self.label_display.setStyleSheet(\"background-color: rgb(0, 0, 0);\")\n self.label_display.setText(\"\")\n self.label_display.setObjectName(\"label_display\")\n self.verticalLayout_3.addWidget(self.label_display)\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.pushButton_play = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_play.sizePolicy().hasHeightForWidth())\n self.pushButton_play.setSizePolicy(sizePolicy)\n self.pushButton_play.setMinimumSize(QtCore.QSize(40, 40))\n self.pushButton_play.setStyleSheet(\"QPushButton {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 0);\\n\"\n\"}\\n\"\n\"QPushButton::focus{outline: none;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 150);\\n\"\n\"}\")\n self.pushButton_play.setText(\"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Disabled, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Disabled, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Active, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Active, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Selected, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Selected, QtGui.QIcon.State.On)\n self.pushButton_play.setIcon(icon1)\n self.pushButton_play.setIconSize(QtCore.QSize(30, 30))\n self.pushButton_play.setCheckable(True)\n self.pushButton_play.setObjectName(\"pushButton_play\")\n self.horizontalLayout_8.addWidget(self.pushButton_play)\n self.progressBar_play = QtWidgets.QProgressBar(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.progressBar_play.sizePolicy().hasHeightForWidth())\n self.progressBar_play.setSizePolicy(sizePolicy)\n self.progressBar_play.setMinimumSize(QtCore.QSize(0, 0))\n self.progressBar_play.setStyleSheet(\"QProgressBar{ \\n\"\n\"color: rgb(255, 255, 255); \\n\"\n\"font:12pt;\\n\"\n\" border-radius:2px; \\n\"\n\"text-align:center; \\n\"\n\"border:none; \\n\"\n\"background-color: rgba(215, 215, 215,100);} \\n\"\n\"\\n\"\n\"QProgressBar:chunk{ \\n\"\n\"border-radius:0px; \\n\"\n\"background: rgba(55, 55, 55, 200);}\")\n self.progressBar_play.setMaximum(1000)\n self.progressBar_play.setProperty(\"value\", 0)\n self.progressBar_play.setFormat(\"\")\n self.progressBar_play.setObjectName(\"progressBar_play\")\n self.horizontalLayout_8.addWidget(self.progressBar_play)\n self.pushButton_stop = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_stop.sizePolicy().hasHeightForWidth())\n self.pushButton_stop.setSizePolicy(sizePolicy)\n self.pushButton_stop.setMinimumSize(QtCore.QSize(40, 40))\n self.pushButton_stop.setStyleSheet(\"QPushButton {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 0);\\n\"\n\"}\\n\"\n\"QPushButton::focus{outline: none;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 150);}\")\n self.pushButton_stop.setText(\"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/images/icons/stop.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n self.pushButton_stop.setIcon(icon2)\n self.pushButton_stop.setIconSize(QtCore.QSize(30, 30))\n self.pushButton_stop.setObjectName(\"pushButton_stop\")\n self.horizontalLayout_8.addWidget(self.pushButton_stop)\n self.horizontalLayout_8.setStretch(0, 1)\n self.horizontalLayout_8.setStretch(1, 12)\n self.horizontalLayout_8.setStretch(2, 1)\n self.verticalLayout_3.addLayout(self.horizontalLayout_8)\n self.tableWidget_results = QtWidgets.QTableWidget(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.tableWidget_results.sizePolicy().hasHeightForWidth())\n self.tableWidget_results.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu\")\n font.setPointSize(11)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget_results.setFont(font)\n self.tableWidget_results.setAutoFillBackground(True)\n self.tableWidget_results.setStyleSheet(\"\")\n self.tableWidget_results.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAsNeeded)\n self.tableWidget_results.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.SizeAdjustPolicy.AdjustToContents)\n self.tableWidget_results.setObjectName(\"tableWidget_results\")\n self.tableWidget_results.setColumnCount(4)\n self.tableWidget_results.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(3, item)\n self.tableWidget_results.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget_results.horizontalHeader().setSortIndicatorShown(False)\n self.tableWidget_results.horizontalHeader().setStretchLastSection(True)\n self.verticalLayout_3.addWidget(self.tableWidget_results)\n self.verticalLayout_3.setStretch(0, 15)\n self.verticalLayout_3.setStretch(1, 1)\n self.verticalLayout_3.setStretch(2, 4)\n self.horizontalLayout.addLayout(self.verticalLayout_3)\n self.horizontalLayout.setStretch(0, 2)\n self.horizontalLayout.setStretch(1, 12)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.label_status = QtWidgets.QLabel(self.centralwidget)\n self.label_status.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\" font-size: 16px;\\n\"\n\" font-weight: light;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"}\\n\"\n\"\")\n self.label_status.setText(\"\")\n self.label_status.setObjectName(\"label_status\")\n self.verticalLayout.addWidget(self.label_status)\n self.verticalLayout.setStretch(0, 9)\n self.horizontalLayout_4.addLayout(self.verticalLayout)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"YOLOv8 GUI\"))\n self.groupBox.setTitle(_translate(\"MainWindow\", \"Tasks\"))\n self.radioButton_det.setText(_translate(\"MainWindow\", \"Detection\"))\n self.radioButton_seg.setText(_translate(\"MainWindow\", \"Segmentation\"))\n self.radioButton_pose.setText(_translate(\"MainWindow\", \"Pose Estimation\"))\n self.groupBox_2.setTitle(_translate(\"MainWindow\", \"Models\"))\n self.comboBox_model.setItemText(0, _translate(\"MainWindow\", \"YOLOv8n\"))\n self.comboBox_model.setItemText(1, _translate(\"MainWindow\", \"YOLOv8s\"))\n self.comboBox_model.setItemText(2, _translate(\"MainWindow\", \"YOLOv8m\"))\n self.comboBox_model.setItemText(3, _translate(\"MainWindow\", \"YOLOv8l\"))\n self.comboBox_model.setItemText(4, _translate(\"MainWindow\", \"YOLOv8x\"))\n self.groupBox_3.setTitle(_translate(\"MainWindow\", \"Inputs\"))\n self.groupBox_4.setTitle(_translate(\"MainWindow\", \"Confidence\"))\n self.groupBox_5.setTitle(_translate(\"MainWindow\", \"IoU\"))\n self.groupBox_6.setTitle(_translate(\"MainWindow\", \"Frame Interval\"))\n item = self.tableWidget_results.horizontalHeaderItem(0)\n item.setText(_translate(\"MainWindow\", \"ID\"))\n item = self.tableWidget_results.horizontalHeaderItem(1)\n item.setText(_translate(\"MainWindow\", \"Class\"))\n item = self.tableWidget_results.horizontalHeaderItem(2)\n item.setText(_translate(\"MainWindow\", \"Confidence\"))\n item = self.tableWidget_results.horizontalHeaderItem(3)\n item.setText(_translate(\"MainWindow\", \"BBox\"))" }, { "identifier": "FileProcessThread", "path": "src/qt/video/video_worker.py", "snippet": "class FileProcessThread(QThread):\n send_thread_start_finish_flag = pyqtSignal(str)\n send_video_info = pyqtSignal(dict)\n send_ai_output = pyqtSignal(list)\n send_display_frame = pyqtSignal(QImage)\n send_play_progress = pyqtSignal(int)\n def __init__(self):\n super(FileProcessThread, self).__init__()\n self.thread_name = \"FileProcessThread\"\n self.threadFlag = False\n \n def set_start_config(self, video_path, ai_task, screen_size, model_name=\"yolov8n\", confidence_threshold=0.35, iou_threshold=0.45, frame_interval=0):\n self.threadFlag = True\n self.video_path = video_path\n self.ai_task = ai_task\n self.pause_process = False\n self.confi_thr = confidence_threshold\n self.iou_thr = iou_threshold\n self.model_name = model_name\n self.frame_interval = frame_interval\n self.get_screen_size(screen_size)\n self._init_yolo()\n self._init_tracker()\n\n def set_iou_threshold(self, iou_threshold):\n self.iou_thr = iou_threshold\n \n def set_confidence_threshold(self, confidence_threshold):\n self.confi_thr = confidence_threshold\n \n def set_model_name(self, model_name):\n self.model_name = model_name\n \n def set_frame_interval(self, frame_interval):\n self.frame_interval = frame_interval\n \n def get_screen_size(self, screen_size):\n self.iw, self.ih = screen_size\n\n def _init_yolo(self):\n if self.ai_task == \"object_detection\":\n self.detector = YoloDetector()\n self.detector.init(\n model_path=os.path.join(ROOT, f\"weights/detection/{self.model_name}.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n self.pose_detector = PoseDetector()\n self.pose_detector.init(\n model_path=os.path.join(ROOT, f\"weights/pose/{self.model_name}-pose.onnx\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"segmentation\":\n self.seg_detector = YOLOSeg()\n self.seg_detector.init(\n model_path=os.path.join(ROOT, f\"weights/segmentation/{self.model_name}-seg.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n\n def _init_tracker(self):\n self.tracker = DeepSort(\n model_path=os.path.join(ROOT, f\"src/models/tracking/deep_sort/deep/checkpoint/ckpt.t7\"))\n \n def stop_process(self):\n self.threadFlag = False\n \n def toggle_play_pause(self):\n self.pause_process = not self.pause_process\n \n def run(self):\n self.send_thread_start_finish_flag.emit(\"processing_on_file\")\n media_fmt = self.check_image_or_video(self.video_path)\n cap = cv.VideoCapture(self.video_path)\n if not cap.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_info = self.get_video_info(cap)\n self.send_video_info.emit(video_info)\n\n model_output = []\n frame_id = 1\n while self.threadFlag:\n if self.pause_process:\n continue\n ret, frame = cap.read()\n if ret is False:\n break\n\n if frame_id % int(self.frame_interval+1) == 0:\n model_output = []\n if self.ai_task == \"object_detection\":\n model_output = self.detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n model_output = self.pose_detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"segmentation\":\n model_output = self.seg_detector.inference(frame, self.confi_thr, self.iou_thr)\n \n if media_fmt == \"video\":\n model_output = self.tracker.update(\n detection_results=model_output,\n ori_img=frame)\n model_output = add_image_id(model_output, frame_id)\n frame = draw_results(frame, model_output)\n display_frame = self.convert_cv_qt(frame, self.ih, self.iw)\n\n self.send_display_frame.emit(display_frame)\n self.send_play_progress.emit(int(frame_id/video_info[\"length\"]*1000))\n self.send_ai_output.emit(model_output)\n frame_id += 1\n cap.release()\n if media_fmt == \"video\":\n blank_image = np.zeros((self.ih, self.iw, 3))\n blank_image = cv.cvtColor(blank_image.astype('uint8'), cv.COLOR_BGR2RGBA)\n show_image = QImage(blank_image.data, blank_image.shape[1], blank_image.shape[0], QImage.Format.Format_RGBA8888)\n self.send_display_frame.emit(show_image)\n self.send_ai_output.emit([])\n self.send_thread_start_finish_flag.emit(\"waiting_for_setting\")\n \n def get_video_info(self, video_cap):\n video_info = {}\n video_info[\"FPS\"] = video_cap.get(cv.CAP_PROP_FPS)\n video_info[\"length\"] = int(video_cap.get(cv.CAP_PROP_FRAME_COUNT))\n video_info[\"size\"] = (int(video_cap.get(cv.CAP_PROP_FRAME_WIDTH)),int(video_cap.get(cv.CAP_PROP_FRAME_HEIGHT)))\n return video_info\n\n def check_image_or_video(self, media_path):\n img_fm = (\".tif\", \".tiff\", \".jpg\", \".jpeg\", \".gif\", \".png\", \".eps\", \".raw\", \".cr2\", \".nef\", \".orf\", \".sr2\", \".bmp\", \".ppm\", \".heif\")\n vid_fm = (\".flv\", \".avi\", \".mp4\", \".3gp\", \".mov\", \".webm\", \".ogg\", \".qt\", \".avchd\")\n media_fms = {\"image\": img_fm, \"video\": vid_fm}\n if any(media_path.lower().endswith(media_fms[\"image\"]) for ext in media_fms[\"image\"]):\n return \"image\"\n elif any(media_path.lower().endswith(media_fms[\"video\"]) for ext in media_fms[\"video\"]):\n return \"video\"\n else:\n raise TypeError(\"Please select an image or video\")\n \n def convert_cv_qt(self, image, screen_height, screen_width):\n h, w, _ = image.shape\n scale = min(screen_width / w, screen_height / h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv.resize(image, (nw, nh))\n image_paded = np.full(shape=[screen_height, screen_width, 3], fill_value=0)\n dw, dh = (screen_width - nw) // 2, (screen_height - nh) // 2\n image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized\n image_paded = cv.cvtColor(image_paded.astype('uint8'), cv.COLOR_BGR2RGBA)\n return QImage(image_paded.data, image_paded.shape[1], image_paded.shape[0], QImage.Format.Format_RGBA8888)" } ]
from src.qt.stream.video_capture import CameraCaptureThread from src.qt.stream.visualize import VideoVisualizationThread from src.qt.stream.ai_worker import AiWorkerThread from src.ui.main_window import Ui_MainWindow from src.qt.video.video_worker import FileProcessThread from PyQt6 import QtGui, QtWidgets from PyQt6.QtCore import Qt import sys import numpy as np
12,220
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self)
self.ai_thread = AiWorkerThread()
2
2023-10-18 09:21:01+00:00
16k
S-LoRA/S-LoRA
slora/server/router/manager.py
[ { "identifier": "SamplingParams", "path": "slora/server/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret" }, { "identifier": "Req", "path": "slora/server/io_struct.py", "snippet": "class Req:\n def __init__(self, adapter_dir, request_id, prompt_ids, sample_params: SamplingParams):\n self.adapter_dir = adapter_dir\n self.request_id = request_id\n self.prompt_ids = prompt_ids\n self.input_len = len(prompt_ids)\n self.max_output_len = sample_params.max_new_tokens\n self.sample_params = sample_params\n self.output_ids = []\n self.output_metadata_list = []\n self.has_generate_finished = False\n self.aborted = False\n\n def to_rpc_obj(self):\n return {\"adapter_dir\": self.adapter_dir,\n \"request_id\": self.request_id,\n \"input_id\": self.prompt_ids,\n \"output_len\": self.max_output_len,\n \"sampling_param\": self.sample_params.to_dict() }\n\n def to_req_detokenization_state(self):\n out = ReqDetokenizationState(self.request_id, self.prompt_ids, self.max_output_len, self.sample_params.ignore_eos)\n if self.output_metadata_list:\n out.gen_metadata.update(self.output_metadata_list[-1])\n return out\n \n def stop_sequences_matched(self):\n for stop_token_ids in self.sample_params.stop_sequences:\n stop_len = len(stop_token_ids)\n if stop_len > 0:\n if len(self.output_ids) >= stop_len:\n if all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)):\n return True\n return False\n\n def __repr__(self):\n return (f\"request_id(n={self.request_id}, \"\n f\"adapter_dir={self.adapter_dir}, \")\n # f\"prompt_ids={self.prompt_ids}, \")" }, { "identifier": "Batch", "path": "slora/server/io_struct.py", "snippet": "class Batch:\n def __init__(self, batch_id, reqs: List[Req]):\n self.batch_id = batch_id\n self.reqs = reqs\n self.id_to_reqs = {req.request_id: req for req in reqs}\n\n self.adapter_dirs = set()\n for req in reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def input_tokens(self):\n batch_input_tokens = 0\n for req in self.reqs:\n batch_input_tokens += req.input_len\n return batch_input_tokens\n\n def calcu_max_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + req.max_output_len\n return tokens\n \n def calcu_used_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + len(req.output_ids)\n return tokens\n\n def mark_finished_req(self, eos_id):\n has_new_finish = False\n for req in self.reqs:\n if req.stop_sequences_matched():\n req.has_generate_finished = True\n has_new_finish = True\n if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False:\n req.has_generate_finished = True\n has_new_finish = True\n if len(req.output_ids) >= req.max_output_len or req.aborted:\n req.has_generate_finished = True\n has_new_finish = True\n return has_new_finish\n\n def filter_finished(self):\n unfinished_req = []\n for req in self.reqs:\n if not req.has_generate_finished:\n unfinished_req.append(req)\n self.reqs = unfinished_req\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n\n self.adapter_dirs = set()\n for req in self.reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def is_clear(self):\n return len(self.reqs) == 0\n\n def merge(self, mini_batch):\n for _req in mini_batch.reqs:\n self.reqs.append(_req)\n self.adapter_dirs.add(_req.adapter_dir)\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n return\n\n def __repr__(self):\n return (f\"batch_id={self.batch_id}, \"\n # f\"reqs={self.reqs}, \"\n f\"req_ids={self.id_to_reqs.keys()}\")" }, { "identifier": "BatchAbortReq", "path": "slora/server/io_struct.py", "snippet": "class BatchAbortReq:\n def __init__(self, req_ids):\n self.reqs: List[str] = req_ids" }, { "identifier": "start_model_process", "path": "slora/server/router/model_infer/model_rpc.py", "snippet": "async def start_model_process(port, world_size):\n # 单卡时不使用 rpc\n if world_size == 1:\n return ModelRpcClient(ModelRpcServer(), world_size)\n \n import multiprocessing\n proc = multiprocessing.Process(target=_init_env, args=(port,))\n proc.start()\n await asyncio.sleep(2)\n repeat_count = 0\n while repeat_count < 20:\n try:\n con = rpyc.connect(\"localhost\", port, config={\"allow_pickle\": True})\n break\n except BaseException:\n await asyncio.sleep(1)\n repeat_count += 1\n if repeat_count == 20:\n raise Exception(\"init rpc env error!\")\n\n assert proc.is_alive()\n return ModelRpcClient(con.root, world_size, rpc_server_process=proc)" }, { "identifier": "ModelRpcClient", "path": "slora/server/router/model_infer/model_rpc.py", "snippet": "class ModelRpcClient:\n def __init__(self, model_rpc, world_size, rpc_server_process=None):\n self.model: ModelRpcServer = model_rpc\n self.world_size = world_size\n self.rpc_server_process = rpc_server_process\n self.use_rpc = self.world_size != 1\n if self.use_rpc:\n def async_wrap(f):\n f = rpyc.async_(f)\n async def _func(*args, **kwargs):\n ans = f(*args, **kwargs)\n await asyncio.to_thread(ans.wait)\n # raise if exception\n return ans.value\n return _func\n self._init_model = async_wrap(self.model.init_model)\n self._load_adapters = rpyc.async_(self.model.load_adapters)\n self._offload_adapters = rpyc.async_(self.model.offload_adapters)\n self._unmerge_adapter = rpyc.async_(self.model.unmerge_adapter)\n self._merge_adapter = rpyc.async_(self.model.merge_adapter)\n self._add_batch = async_wrap(self.model.add_batch)\n self._prefill_batch = async_wrap(self.model.prefill_batch)\n self._decode_batch = async_wrap(self.model.decode_batch)\n self._filter_batch = async_wrap(self.model.filter_batch)\n self._merge_batch = async_wrap(self.model.merge_batch)\n self._remove_batch = async_wrap(self.model.remove_batch)\n self._profile_prefill = async_wrap(self.model.profile_prefill)\n else:\n self._init_model = self.model.exposed_init_model\n self._load_adapters = self.model.exposed_load_adapters\n self._offload_adapters = self.model.exposed_offload_adapters\n self._merge_adapter = self.model.exposed_merge_adapter\n self._unmerge_adapter = self.model.exposed_unmerge_adapter\n self._add_batch = self.model.exposed_add_batch\n self._prefill_batch = self.model.exposed_prefill_batch\n self._decode_batch = self.model.exposed_decode_batch\n self._filter_batch = self.model.exposed_filter_batch\n self._merge_batch = self.model.exposed_merge_batch\n self._remove_batch = self.model.exposed_remove_batch\n self._profile_prefill = self.model.exposed_profile_prefill\n return\n\n async def init_model(self, rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t prefetch_stream):\n ans : rpyc.AsyncResult = self._init_model(rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t\t\t\t prefetch_stream)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n\n async def load_adapters(self, reqs, prefetch=False):\n self._load_adapters(reqs, prefetch=prefetch)\n\n\n async def offload_adapters(self, reserved_reqs=None, prefetch=False):\n self._offload_adapters(reserved_reqs, prefetch=prefetch)\n \n async def unmerge_adapter(self):\n self._unmerge_adapter()\n \n async def merge_adapter(self):\n self._merge_adapter()\n\n\n async def init_batch(self, batch_id, reqs):\n ans = self._add_batch(batch_id, reqs, \"fp16\")\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def prefill_batch(self, batch_id):\n ans = self._prefill_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def decode_batch(self, batch_id):\n ans = self._decode_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def filter_batch(self, batch_id, req_id_list):\n ans = self._filter_batch(batch_id, req_id_list)\n if self.use_rpc:\n await ans\n return\n else:\n return \n\n async def merge_batch(self, batch_id1, batch_id2):\n ans = self._merge_batch(batch_id1, batch_id2)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def remove_batch(self, batch_id):\n ans = self._remove_batch(batch_id)\n if self.use_rpc:\n await ans\n return\n else:\n return\n \n async def profile_prefill(self):\n ans = self._profile_prefill()\n if self.use_rpc:\n return await ans\n else:\n return ans" }, { "identifier": "ReqQueue", "path": "slora/server/router/req_queue.py", "snippet": "class ReqQueue:\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n self.max_total_tokens = max_total_tokens\n assert batch_max_tokens is not None\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n self.waiting_req_list: List[Req] = []\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n \n def update_counter(self, req):\n pass \n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "calculate_time", "path": "slora/utils/infer_utils.py", "snippet": "def calculate_time(show=False, min_cost_ms=0.0):\n def wrapper(func):\n def inner_func(*args, **kwargs):\n torch.cuda.synchronize()\n if show:\n start_time = time.time()\n result = func(*args, **kwargs)\n torch.cuda.synchronize()\n if show:\n cost_time = (time.time() - start_time) * 1000\n if cost_time > min_cost_ms:\n print(f\"Function {func.__name__} took {cost_time} ms to run.\")\n return result\n\n return inner_func\n\n return wrapper" }, { "identifier": "BatchTokenIdOut", "path": "slora/server/io_struct.py", "snippet": "class BatchTokenIdOut:\n def __init__(self):\n self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = [] # [req_id, new_token_id, gen_metadata, finished_state, abort_state]" }, { "identifier": "AbortReq", "path": "slora/server/io_struct.py", "snippet": "class AbortReq:\n def __init__(self, req_id):\n self.req_id = req_id" }, { "identifier": "Stats", "path": "slora/server/router/stats.py", "snippet": "class Stats:\n\n def __init__(self, log_status, log_stats_interval) -> None:\n self.log_stats = log_status\n self.log_stats_interval = log_stats_interval\n self.last_log_time = time.time()\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n return\n \n def count_prompt_tokens(self, run_batch):\n if self.log_stats:\n tokens = run_batch.input_tokens()\n self.prompt_tokens += tokens\n self.all_tokens += tokens\n return\n \n def count_output_tokens(self, run_batch):\n if self.log_stats:\n tokens = len(run_batch.reqs)\n self.output_tokens += tokens\n self.all_tokens += tokens\n return\n\n def print_stats(self):\n if not self.log_stats:\n return\n\n now = time.time()\n if now - self.last_log_time > self.log_stats_interval:\n print(f\"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s\")\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n self.last_log_time = now\n return" }, { "identifier": "InputParams", "path": "slora/server/input_params.py", "snippet": "class InputParams:\n\n def __init__(\n self,\n max_req_total_len,\n # kv cache manager parameters\n max_total_token_num,\n pool_size_lora,\n batch_max_tokens,\n running_max_req_size,\n # mem_ratio,\n # adapter_ratio,\n # heuristic\n swap,\n prefetch,\n prefetch_size,\n scheduler,\n profile,\n batch_num_adapters,\n enable_abort,\n # kernel,\n # # debug\n dummy,\n no_lora_compute,\n no_lora_swap,\n # no_lora_copy,\n no_kernel,\n no_mem_pool,\n bmm,\n no_lora,\n # fairness\n fair_weights,\n ) -> None:\n self.max_req_total_len = max_req_total_len\n self.max_total_token_num = max_total_token_num\n self.pool_size_lora = pool_size_lora\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n # self.mem_ratio = mem_ratio\n # self.adapter_ratio = adapter_ratio\n\n self.swap = swap\n self.prefetch = prefetch\n self.prefetch_size = prefetch_size\n self.scheduler = scheduler\n self.profile = profile\n self.batch_num_adapters = batch_num_adapters\n self.enable_abort = enable_abort\n # self.kernel = kernel\n\n self.dummy = dummy\n self.no_lora_compute = no_lora_compute\n self.no_lora_swap = no_lora_swap\n # self.no_lora_copy = no_lora_copy\n self.no_kernel = no_kernel\n self.no_mem_pool = no_mem_pool\n self.bmm = bmm\n self.no_lora = no_lora\n \n self.fair_weights = fair_weights\n return" }, { "identifier": "get_lora_config", "path": "slora/models/peft/lora_adapter.py", "snippet": "def get_lora_config(lora_dir, dummy):\n if dummy:\n return get_lora_config_json(lora_dir), lora_dir\n else:\n lora_dir = re.sub(r'-(\\d+)$', '', lora_dir)\n return hf_load_config(lora_dir)" }, { "identifier": "AlphaModel", "path": "slora/server/router/profiler.py", "snippet": "class AlphaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n print(self.base_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n\n def get_latency(self, batch_size, seq_len):\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n return (self.base_prefill[batch_size - 1][seq_len] + self.base_prefill[batch_size + 1][seq_len]) / 2\n else:\n return np.Inf" }, { "identifier": "BetaModel", "path": "slora/server/router/profiler.py", "snippet": "class BetaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n self.adapter_prefill = profiling_results[1]\n print(self.adapter_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n \n def get_latency(self, rank_size, batch_size, seq_len):\n if rank_size == 0: return 0\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.adapter_prefill[rank_size][batch_size][seq_len] - self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.adapter_prefill[rank_size][2][seq_len] - self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n a = self.adapter_prefill[rank_size][batch_size - 1][seq_len] - self.base_prefill[batch_size - 1][seq_len]\n b = self.adapter_prefill[rank_size][batch_size + 1][seq_len] - self.base_prefill[batch_size + 1][seq_len]\n return (a + b) / 2\n else:\n return np.Inf" }, { "identifier": "AbortReqQueue", "path": "slora/server/router/abort_req_queue.py", "snippet": "class AbortReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.abort_req_list: List[str] = []\n self.req_time_stamp = []\n self.init_bs = 1\n self.apprx_req_rate = 1\n self.apprx_bs = self.init_bs\n self.last_req_num = 0\n self.last_time = time.time()\n \n def append(self, req):\n self.waiting_req_list.insert(0, req)\n self.req_time_stamp.insert(0, time.time())\n assert len(self.waiting_req_list) == len(self.req_time_stamp)\n return\n\n def reset_abort_list(self):\n self.abort_req_list = []\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n self.apprx_req_rate = int(0.7 * (len(self.waiting_req_list) - self.last_req_num) + 0.3 * self.apprx_req_rate)\n for i, req in enumerate(self.waiting_req_list):\n if attainment_func(time.time() - self.req_time_stamp[i] + 0.5) == 0:\n req.aborted = True\n aborted_count += 1\n abort_list.append(req)\n self.abort_req_list.append(req.request_id)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in abort_list]\n \n if self.apprx_req_rate >= self.apprx_bs:\n print(\"apprx bs\", self.apprx_bs, \"req rate\", self.apprx_req_rate)\n # choose from the latest requests\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n elif self.apprx_req_rate < self.apprx_bs:\n # choose from the earliest requests\n for req in reversed(self.waiting_req_list):\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n \n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in can_run_list and self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n self.last_req_num = len(self.waiting_req_list)\n self.apprx_bs = max(int(0.7 * len(new_batch.reqs) + 0.3 * self.apprx_bs), self.init_bs)\n return new_batch\n else:\n return None" }, { "identifier": "ClusterReqQueue", "path": "slora/server/router/cluster_req_queue.py", "snippet": "class ClusterReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, batch_num_adapters) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.batch_num_adapters = batch_num_adapters\n\n def _generate_new_batch_prioritizing_existing_adapters(self, current_batch:Batch, lora_ranks: dict[str, int]):\n filtered_waiting_req_list = list(filter(lambda req: req.adapter_dir in current_batch.adapter_dirs, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n can_run_list = []\n new_batch_total_tokens = 0\n for idx, req in enumerate(filtered_waiting_req_list):\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n\n # If filtered waiting list was not enough to max-out the current running batch, we resolve to FIFO\n for req in self.waiting_req_list:\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n\n return can_run_list\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n\n if current_batch is not None and len(current_batch.adapter_dirs) >= self.batch_num_adapters:\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n rest_of_batch = self._generate_new_batch_prioritizing_existing_adapters(current_batch, lora_ranks)\n can_run_list += rest_of_batch\n if len(can_run_list) != 0:\n return Batch(uuid.uuid4().hex, can_run_list)\n else:\n return None\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None" }, { "identifier": "VTCReqQueue", "path": "slora/server/router/vtc_req_queue.py", "snippet": "class VTCReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size,\n adapter_dirs, fair_weights,\n input_price=1, output_price=2) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.input_price = input_price\n self.output_price = output_price\n self.served = {}\n self.user_req_list = {}\n\n self.adapter_dirs = adapter_dirs\n self.fair_weights = fair_weights\n\n self.fairw = {}\n for i in range(len(adapter_dirs)):\n if i < len(fair_weights):\n self.fairw[adapter_dirs[i]] = fair_weights[i]\n else:\n self.fairw[adapter_dirs[i]] = 1\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n if req.adapter_dir not in self.user_req_list:\n self.user_req_list[req.adapter_dir] = deque([req])\n self.served[req.adapter_dir] = 0\n else:\n self.user_req_list[req.adapter_dir].append(req)\n\n # waiting queue was empty before\n if len(self.user_req_list[req.adapter_dir]) == 1:\n # lift counter\n cnts = [v for k, v in self.served.items()\n if (len(self.user_req_list[k]) > 0 and k != req.adapter_dir)]\n if len(cnts) > 0:\n self.served[req.adapter_dir] = max(self.served[req.adapter_dir], min(cnts))\n\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n if len(self.served) == 0:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n active_served = {k: v for k, v in self.served.items()}\n while True:\n if len(active_served) == 0:\n break\n adapter_dir = min(active_served, key=active_served.get)\n if len(self.user_req_list[adapter_dir]) > 0:\n req = self.user_req_list[adapter_dir][0]\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n self.user_req_list[adapter_dir].popleft()\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n self.user_req_list[adapter_dir].popleft()\n # update fairness counter\n self.served[adapter_dir] += req.input_len * self.input_price / self.fairw[adapter_dir]\n active_served[adapter_dir] += req.input_len * self.input_price / self.fairw[adapter_dir]\n else:\n break\n else:\n del active_served[adapter_dir]\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list\n if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n \n def update_counter(self, current_batch: Batch):\n for req in current_batch.reqs:\n self.served[req.adapter_dir] += 1 * self.output_price / self.fairw[req.adapter_dir]\n\n\n def next_batch(self):\n raise NotImplementedError()" }, { "identifier": "PETSReqQueue", "path": "slora/server/router/pets_req_queue.py", "snippet": "class PETSReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.alpha = None\n self.beta = None # will be set automatically in the profiling function in router.manager\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n def intra_task_batching(self, lora_ranks):\n ## Preprocessing: gather the queries with the same adapter.\n clustered_queries_by_adapter = {}\n for query in self.waiting_req_list:\n adapter_dir = query.adapter_dir\n if adapter_dir in clustered_queries_by_adapter:\n clustered_queries_by_adapter[adapter_dir].append(query)\n else:\n clustered_queries_by_adapter[adapter_dir] = [query]\n\n ## DP\n mini_batches = []\n for adapter_dir, queries in clustered_queries_by_adapter.items():\n state_1st_stage = []\n split_idx_list = []\n\n ### Sort queries according to the sequence length in ascending order.\n queries = sorted(queries, key=lambda x: x.input_len)\n queries.insert(0, None) # Sentinel.\n\n ### Initialize.\n state_1st_stage.append(0)\n split_idx_list.append(0)\n for j in range(1, len(queries)):\n min_cost = np.Inf # INF\n split_idx = 0\n for k in range(1, j+1):\n lora_rank = lora_ranks[adapter_dir]\n tmp = state_1st_stage[k-1] + self.beta.get_latency(lora_rank, j-k+1, queries[j].input_len)\n if tmp < min_cost:\n min_cost = tmp\n split_idx = k-1\n split_idx_list.append(split_idx)\n state_1st_stage.append(min_cost)\n \n ### Split queries into mini-batches according to split_idx_list.\n \n end_idx = len(queries) - 1\n\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n mini_batch = []\n max_len = queries[end_idx].input_len\n for j in range(start_idx, end_idx + 1):\n mini_batch.append(queries[j]) \n mini_batches.append((mini_batch, max_len))\n end_idx = split_idx_list[end_idx] \n \n return mini_batches\n \n # Inter-task batching.\n def inter_task_batching(self, mini_batches):\n ## Sort mini_batches according to the max sequence length.\n mini_batches = sorted(mini_batches, key=lambda x: x[1])\n mini_batches.insert(0, None) # Sentinel.\n\n tmp = 0\n mini_batch_sum = [0]\n for mini_batch in mini_batches[1:]:\n tmp += len(mini_batch[0])\n mini_batch_sum.append(tmp)\n\n ## DP.\n state_2nd_stage = []\n split_idx_list = []\n state_2nd_stage.append(0)\n split_idx_list.append(0)\n\n for i in range(1, len(mini_batches)):\n min_cost = np.Inf # INF\n split_idx = 0\n for j in range(1, i+1):\n total_samples = mini_batch_sum[i] - mini_batch_sum[j-1]\n tmp = state_2nd_stage[j-1] + self.alpha.get_latency(total_samples, mini_batches[i][1])\n if tmp < min_cost:\n min_cost = tmp\n split_idx = j - 1\n split_idx_list.append(split_idx)\n state_2nd_stage.append(min_cost)\n\n ## Split mini_batches into final scheduled_batches.\n ### Split mini_batches into macro_batches.\n\n end_idx = len(mini_batches) - 1\n macro_batches = []\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n macro_batch = []\n max_len = mini_batches[end_idx][1]\n for j in range(start_idx, end_idx + 1):\n macro_batch.append(mini_batches[j]) \n macro_batches.append((macro_batch, max_len))\n end_idx = split_idx_list[end_idx] \n\n total_samples = 0\n for macro_batch in macro_batches:\n for mini_batch in macro_batch[0]:\n total_samples += len(mini_batch[0])\n # print(total_samples)\n\n return macro_batches\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n reqs = self.waiting_req_list\n # when waiting_reqs > 20\n if len(self.waiting_req_list) > 10:\n mini_batches = self.intra_task_batching(lora_ranks)\n macro_batches = self.inter_task_batching(mini_batches)\n \n macro_batch = macro_batches[-1][0]\n reqs = [req for minibatch in macro_batch for req in minibatch[0]]\n \n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in reqs:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "PEFTReqQueue", "path": "slora/server/router/peft_req_queue.py", "snippet": "class PEFTReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n if len(self.waiting_req_list) > 0 and current_batch is None:\n adapter_dir = self.waiting_req_list[0].adapter_dir\n if current_batch is not None:\n adapter_dir = current_batch.reqs[0].adapter_dir\n # heuristics:\n # TODO: think more\n max_other_waited_reqs = 30\n other_waited_reqs = 0\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n other_waited_reqs += 1\n if other_waited_reqs > max_other_waited_reqs:\n return None\n continue\n if req.adapter_dir == adapter_dir:\n break\n\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n continue\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" } ]
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..io_struct import BatchTokenIdOut, AbortReq from .stats import Stats from slora.server.input_params import InputParams from slora.models.peft.lora_adapter import get_lora_config from slora.server.router.profiler import AlphaModel, BetaModel from slora.server.router.abort_req_queue import AbortReqQueue from slora.server.router.cluster_req_queue import ClusterReqQueue from slora.server.router.vtc_req_queue import VTCReqQueue from slora.server.router.pets_req_queue import PETSReqQueue from slora.server.router.peft_req_queue import PEFTReqQueue
13,443
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: return ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: return AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "slora": return ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: raise Exception("unrecognized scheduler") class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 self.req_queue = get_scheduler(input_params, adapter_dirs) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: return ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: return AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "slora": return ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: raise Exception("unrecognized scheduler") class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 self.req_queue = get_scheduler(input_params, adapter_dirs) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size):
rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size)
4
2023-11-05 04:08:36+00:00
16k
fleet-ai/context
cli.py
[ { "identifier": "print_markdown", "path": "utils/utils.py", "snippet": "def print_markdown(message):\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rprint(Rule(style=\"white\"))\n elif line.startswith(\"!!!\"):\n rprint(Text(line[3:], style=\"#D5D7FB\"))\n else:\n rprint(Markdown(line))\n\n if \"\\n\" not in message and message.startswith(\">\"):\n print(\"\")" }, { "identifier": "print_exception", "path": "utils/utils.py", "snippet": "def print_exception(exc_type, exc_value, traceback_obj):\n traceback_details = traceback.extract_tb(traceback_obj)\n for filename, lineno, funcname, text in traceback_details:\n console.print(\n f\"File: {filename}, Line: {lineno}, Func: {funcname}, Text: {text}\"\n )\n console.print(f\"{exc_type.__name__}: {exc_value}\")" }, { "identifier": "extract_code_blocks", "path": "utils/utils.py", "snippet": "def extract_code_blocks(message):\n pattern = r\"```python\\n(.*?)```\"\n matches = re.findall(pattern, message, re.DOTALL)\n return \"\\n\".join(matches)" }, { "identifier": "print_help", "path": "utils/utils.py", "snippet": "def print_help():\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"Command\")\n table.add_column(\"Description\")\n\n # Add rows to the table for each command\n table.add_row(\"-k, --k_value\", \"Number of chunks to return\")\n table.add_row(\n \"-l, --libraries\",\n \"Limit your chat to a list of libraries. Usage: -l library1 library2 library3\",\n )\n table.add_row(\n \"-m, --model\", \"Specify the model. Default: gpt-4-1106-preview (gpt-4-turbo)\"\n )\n table.add_row(\n \"-c, --cite_sources\", \"Determines whether or not the AI model cites its sources\"\n )\n table.add_row(\"-h, --help\", \"Help\")\n\n # Create a panel with the table\n panel = Panel(table, title=\"Help\", border_style=\"blue\")\n\n # Print the panel\n rprint(panel)" }, { "identifier": "TextStream", "path": "utils/stream.py", "snippet": "class TextStream:\n def __init__(self):\n self.live = Live(console=Console(), auto_refresh=False)\n self.live.start()\n\n def print_stream(self, message):\n markdown = Markdown(message.strip() + \"●\")\n panel = Panel(markdown, box=MINIMAL)\n self.live.update(panel)\n self.live.refresh()\n\n def end_stream(self):\n self.live.stop()" }, { "identifier": "retrieve_context", "path": "utils/ai.py", "snippet": "def retrieve_context(query, k=10, filters=None):\n \"\"\"Gets the context from our libraries vector db for a given query.\n\n Args:\n query (str): User input query\n k (int, optional): number of retrieved results. Defaults to 10.\n \"\"\"\n\n # First, we query the API\n responses = retrieve(query, k=k, filters=filters)\n\n # Then, we build the prompt_with_context string\n prompt_with_context = \"\"\n for response in responses:\n prompt_with_context += f\"\\n\\n### Context {response['metadata']['url']} ###\\n{response['metadata']['text']}\"\n return {\"role\": \"user\", \"content\": prompt_with_context}" }, { "identifier": "construct_prompt", "path": "utils/ai.py", "snippet": "def construct_prompt(\n messages,\n context_message,\n model=\"gpt-4-1106-preview\",\n cite_sources=True,\n context_window=3000,\n):\n \"\"\"\n Constructs a RAG (Retrieval-Augmented Generation) prompt by balancing the token count of messages and context_message.\n If the total token count exceeds the maximum limit, it adjusts the token count of each to maintain a 1:1 proportion.\n It then combines both lists and returns the result.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n context_message (dict): Context message to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n List[dict]: The constructed RAG prompt.\n \"\"\"\n # Get the encoding; default to cl100k_base\n if model in OPENAI_MODELS:\n encoding = tiktoken.encoding_for_model(model)\n else:\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n\n # 1) calculate tokens\n reserved_space = 1000\n max_messages_count = int((context_window - reserved_space) / 2)\n max_context_count = int((context_window - reserved_space) / 2)\n\n # 2) construct prompt\n prompts = messages.copy()\n prompts.insert(0, {\"role\": \"system\", \"content\": SYSTEM_PROMPT})\n if cite_sources:\n prompts.insert(-1, {\"role\": \"user\", \"content\": PROMPT})\n\n # 3) find how many tokens each list has\n messages_token_count = len(\n encoding.encode(\n \"\\n\".join(\n [\n f\"<|im_start|>{message['role']}\\n{message['content']}<|im_end|>\"\n for message in prompts\n ]\n )\n )\n )\n context_token_count = len(\n encoding.encode(\n f\"<|im_start|>{context_message['role']}\\n{context_message['content']}<|im_end|>\"\n )\n )\n\n # 4) Balance the token count for each\n if (messages_token_count + context_token_count) > (context_window - reserved_space):\n # context has more than limit, messages has less than limit\n if (messages_token_count < max_messages_count) and (\n context_token_count > max_context_count\n ):\n max_context_count += max_messages_count - messages_token_count\n # messages has more than limit, context has less than limit\n elif (messages_token_count > max_messages_count) and (\n context_token_count < max_context_count\n ):\n max_messages_count += max_context_count - context_token_count\n\n # 5) Cut each list to the max count\n\n # Cut down messages\n while messages_token_count > max_messages_count:\n removed_encoding = encoding.encode(\n f\"<|im_start|>{prompts[1]['role']}\\n{prompts[1]['content']}<|im_end|>\"\n )\n messages_token_count -= len(removed_encoding)\n if messages_token_count < max_messages_count:\n prompts = (\n [prompts[0]]\n + [\n {\n \"role\": prompts[1][\"role\"],\n \"content\": encoding.decode(\n removed_encoding[\n : min(\n int(max_messages_count -\n messages_token_count),\n len(removed_encoding),\n )\n ]\n )\n .replace(\"<|im_start|>\", \"\")\n .replace(\"<|im_end|>\", \"\"),\n }\n ]\n + prompts[2:]\n )\n else:\n prompts = [prompts[0]] + prompts[2:]\n\n # Cut down context\n if context_token_count > max_context_count:\n # Taking a proportion of the content chars length\n reduced_chars_length = int(\n len(context_message[\"content\"]) *\n (max_context_count / context_token_count)\n )\n context_message[\"content\"] = context_message[\"content\"][:reduced_chars_length]\n\n # 6) Combine both lists\n prompts.insert(-1, context_message)\n\n return prompts" }, { "identifier": "get_remote_chat_response", "path": "utils/ai.py", "snippet": "def get_remote_chat_response(messages, model=\"gpt-4-1106-preview\"):\n \"\"\"\n Returns a streamed OpenAI chat response.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n str: The streamed OpenAI chat response.\n \"\"\"\n client = OpenAI(api_key=os.environ.get(\"OPENAI_API_KEY\"))\n\n try:\n response = client.chat.completions.create(\n model=model, messages=messages, temperature=0.2, stream=True\n )\n\n for chunk in response:\n current_context = chunk.choices[0].delta.content\n yield current_context\n\n except openai.AuthenticationError as error:\n print(\"401 Authentication Error:\", error)\n raise Exception(\n \"Invalid OPENAI_API_KEY. Please re-run with a valid key.\")\n\n except Exception as error:\n print(\"Streaming Error:\", error)\n raise Exception(\"Internal Server Error\")" }, { "identifier": "get_other_chat_response", "path": "utils/ai.py", "snippet": "def get_other_chat_response(messages, model=\"local-model\"):\n \"\"\"\n Returns a streamed chat response from a local server.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n str: The streamed chat response.\n \"\"\"\n try:\n if model == \"local-model\":\n url = \"http://localhost:1234/v1/chat/completions\"\n headers = {\"Content-Type\": \"application/json\"}\n data = {\n \"messages\": messages,\n \"temperature\": 0.2,\n \"max_tokens\": -1,\n \"stream\": True,\n }\n response = requests.post(\n url, headers=headers, data=json.dumps(data), stream=True, timeout=120\n )\n\n if response.status_code == 200:\n for chunk in response.iter_content(chunk_size=None):\n decoded_chunk = chunk.decode()\n if (\n \"data:\" in decoded_chunk\n and decoded_chunk.split(\"data:\")[1].strip()\n ): # Check if the chunk is not empty\n try:\n chunk_dict = json.loads(\n decoded_chunk.split(\"data:\")[1].strip()\n )\n yield chunk_dict[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n except json.JSONDecodeError:\n pass\n else:\n print(f\"Error: {response.status_code}, {response.text}\")\n raise Exception(\"Internal Server Error\")\n else:\n if not os.environ.get(\"OPENROUTER_API_KEY\"):\n raise Exception(\n f\"For non-OpenAI models, like {model}, set your OPENROUTER_API_KEY.\"\n )\n\n response = requests.post(\n url=\"https://openrouter.ai/api/v1/chat/completions\",\n headers={\n \"Authorization\": f\"Bearer {os.environ.get('OPENROUTER_API_KEY')}\",\n \"HTTP-Referer\": os.environ.get(\n \"OPENROUTER_APP_URL\", \"https://fleet.so/context\"\n ),\n \"X-Title\": os.environ.get(\"OPENROUTER_APP_TITLE\", \"Fleet Context\"),\n \"Content-Type\": \"application/json\",\n },\n data=json.dumps(\n {\"model\": model, \"messages\": messages, \"stream\": True}),\n stream=True,\n timeout=120,\n )\n if response.status_code == 200:\n for chunk in response.iter_lines():\n decoded_chunk = chunk.decode(\"utf-8\")\n if (\n \"data:\" in decoded_chunk\n and decoded_chunk.split(\"data:\")[1].strip()\n ): # Check if the chunk is not empty\n try:\n chunk_dict = json.loads(\n decoded_chunk.split(\"data:\")[1].strip()\n )\n yield chunk_dict[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n except json.JSONDecodeError:\n pass\n else:\n print(f\"Error: {response.status_code}, {response.text}\")\n raise Exception(\"Internal Server Error\")\n\n except requests.exceptions.RequestException as error:\n print(\"Request Error:\", error)\n raise Exception(\n \"Invalid request. Please check your request parameters.\")" }, { "identifier": "ARGUMENTS", "path": "constants/cli.py", "snippet": "ARGUMENTS = [\n {\n \"name\": \"k_value\",\n \"nickname\": \"k\",\n \"help_text\": \"Number of chunks to return\",\n \"type\": int,\n \"default\": 15,\n },\n {\n \"name\": \"libraries\",\n \"nickname\": \"l\",\n \"help_text\": \"Limit your chat to a list of libraries. Usage: -l library1 library2 library3\",\n \"type\": list,\n },\n {\n \"name\": \"model\",\n \"nickname\": \"m\",\n \"help_text\": \"Specify the model. Default: gpt-4\",\n \"type\": str,\n \"default\": \"gpt-4\"\n },\n {\n \"name\": \"cite_sources\",\n \"nickname\": \"c\",\n \"help_text\": \"Determines whether or not the AI model cites its sources\",\n \"type\": bool,\n \"default\": True,\n },\n {\n \"name\": \"local\",\n \"nickname\": \"n\",\n \"help_text\": \"Uses LMStudio for local models\",\n \"type\": bool,\n \"default\": False,\n },\n {\n \"name\": \"context_window\",\n \"nickname\": \"w\",\n \"help_text\": \"Context window (if using local models)\",\n \"type\": int,\n \"default\": 3000,\n },\n]" }, { "identifier": "LIBRARIES", "path": "constants/cli.py", "snippet": "LIBRARIES = [\n \"python\",\n \"boto3\",\n \"urllib3\",\n \"botocore\",\n \"setuptools\",\n \"requests\",\n \"typing-extensions\",\n \"certifi\",\n \"charset-normalizer\",\n \"wheel\",\n \"cryptography\",\n \"python-dateutil\",\n \"idna\",\n \"pyyaml\",\n \"google-api-core\",\n \"six\",\n \"pytz\",\n \"numpy\",\n \"importlib-metadata\",\n \"pip\",\n \"packaging\",\n \"zipp\",\n \"awscli\",\n \"aiobotocore\",\n \"protobuf\",\n \"click\",\n \"pandas\",\n \"pyasn1\",\n \"rsa\",\n \"fsspec\",\n \"pyjwt\",\n \"jmespath\",\n \"markupsafe\",\n \"s3fs\",\n \"attrs\",\n \"cffi\",\n \"psutil\",\n \"lxml\",\n \"pydantic\",\n \"colorama\",\n \"platformdirs\",\n \"googleapis-common-protos\",\n \"pycparser\",\n \"google-auth\",\n \"pyopenssl\",\n \"virtualenv\",\n \"cachetools\",\n \"werkzeug\",\n \"jinja2\",\n \"jsonschema\",\n \"filelock\",\n \"flask\",\n \"sqlalchemy\",\n \"pyparsing\",\n \"docutils\",\n \"async-timeout\",\n \"tzlocal\",\n \"oauthlib\",\n \"pluggy\",\n \"tomli\",\n \"aiohttp\",\n \"grpcio\",\n \"requests-oauthlib\",\n \"pyarrow\",\n \"pytest\",\n \"wrapt\",\n \"tqdm\",\n \"soupsieve\",\n \"dnspython\",\n \"isodate\",\n \"azure-core\",\n \"frozenlist\",\n \"coverage\",\n \"pygments\",\n \"websocket-client\",\n \"beautifulsoup4\",\n \"pillow\",\n \"greenlet\",\n \"importlib-resources\",\n \"distlib\",\n \"yarl\",\n \"multidict\",\n \"scipy\",\n \"decorator\",\n \"aiofiles\",\n \"et-xmlfile\",\n \"openpyxl\",\n \"google-cloud-storage\",\n \"google-cloud-core\",\n \"httptools\",\n \"chardet\",\n \"iniconfig\",\n \"asn1crypto\",\n \"tomlkit\",\n \"tabulate\",\n \"more-itertools\",\n \"requests-toolbelt\",\n \"google-resumable-media\",\n \"paramiko\",\n \"aioconsole\",\n \"deprecated\",\n \"gitpython\",\n \"pynacl\",\n \"google-api-python-client\",\n \"pymysql\",\n \"psycopg2\",\n \"rpds-py\",\n \"proto-plus\",\n \"anyio\",\n \"itsdangerous\",\n \"msal\",\n \"referencing\",\n \"azure-storage-blob\",\n \"jsonschema-specifications\",\n \"bcrypt\",\n \"pathspec\",\n \"scikit-learn\",\n \"smmap\",\n \"msgpack\",\n \"matplotlib\",\n \"poetry-core\",\n \"keyring\",\n \"joblib\",\n \"regex\",\n \"mypy-extensions\",\n \"wcwidth\",\n \"docker\",\n \"sniffio\",\n \"google-auth-oauthlib\",\n \"kiwisolver\",\n \"portalocker\",\n \"pexpect\",\n \"ptyprocess\",\n \"jaraco-classes\",\n \"dill\",\n \"pyrsistent\",\n \"ruamel-yaml\",\n \"gitdb\",\n \"pycryptodomex\",\n \"sqlparse\",\n \"msrest\",\n \"google-crc32c\",\n \"sagemaker\",\n \"tenacity\",\n \"prompt-toolkit\",\n \"google-cloud-bigquery\",\n \"tzdata\",\n \"snowflake-connector-python\",\n \"gunicorn\",\n \"cython\",\n \"py4j\",\n \"py\",\n \"markdown\",\n \"azure-identity\",\n \"httplib2\",\n \"future\",\n \"fonttools\",\n \"alembic\",\n \"markdown-it-py\",\n \"cachecontrol\",\n \"awswrangler\",\n \"rich\",\n \"msal-extensions\",\n \"tornado\",\n \"threadpoolctl\",\n \"jedi\",\n \"marshmallow\",\n \"google-auth-httplib2\",\n \"traitlets\",\n \"cloudpickle\",\n \"shellingham\",\n \"redis\",\n \"pycodestyle\",\n \"backoff\",\n \"python-dotenv\",\n \"scramp\",\n \"toml\",\n \"h11\",\n \"pytest-cov\",\n \"termcolor\",\n \"trove-classifiers\",\n \"annotated-types\",\n \"uritemplate\",\n \"ipython\",\n \"pyzmq\",\n \"networkx\",\n \"xmltodict\",\n \"uvicorn\",\n \"pyspark\",\n \"pg8000\",\n \"mccabe\",\n \"ply\",\n \"prometheus-client\",\n \"prometheus-python\",\n \"redshift-connector\",\n \"oscrypto\",\n \"dulwich\",\n \"webencodings\",\n \"pyodbc\",\n \"pycryptodome\",\n \"httpx\",\n \"sortedcontainers\",\n \"httpcore\",\n \"jeepney\",\n \"mako\",\n \"babel\",\n \"poetry\",\n \"secretstorage\",\n \"defusedxml\",\n \"isort\",\n \"jsonpointer\",\n \"blinker\",\n \"black\",\n \"jupyter-client\",\n \"typing-inspect\",\n \"jupyter-core\",\n \"pymongo\",\n \"mdit-py-plugins\",\n \"datadog\",\n \"contourpy\",\n \"adal\",\n \"pkginfo\",\n \"parso\",\n \"tensorboard\",\n \"toolz\",\n \"pyflakes\",\n \"absl-py\",\n \"sentry-sdk\",\n \"xlrd\",\n \"requests-aws4auth\",\n \"flake8\",\n \"jsonpath-ng\",\n \"python-json-logger\",\n \"nbconvert\",\n \"pickleshare\",\n \"build\",\n \"mdurl\",\n \"backcall\",\n \"fastapi\",\n \"rapidfuzz\",\n \"argcomplete\",\n \"python-utils\",\n \"transformers\",\n \"matplotlib-inline\",\n \"setuptools-scm\",\n \"nbformat\",\n \"ipykernel\",\n \"databricks-cli\",\n \"notebook\",\n \"fastjsonschema\",\n \"jupyter-server\",\n \"mistune\",\n \"huggingface-hub\",\n \"kubernetes\",\n \"debugpy\",\n \"starlette\",\n \"arrow\",\n \"asttokens\",\n \"progressbar2\",\n \"tensorflow\",\n \"google-cloud-pubsub\",\n \"websockets\",\n \"astroid\",\n \"jsonpatch\",\n \"asynctest\",\n \"aioitertools\",\n \"imageio\",\n \"simplejson\",\n \"appdirs\",\n \"pyproject-hooks\",\n \"pylint\",\n \"pbr\",\n \"lazy-object-proxy\",\n \"multiprocess\",\n \"smart-open\",\n \"altair\",\n \"h5py\",\n \"asgiref\",\n \"backports-zoneinfo\",\n \"tinycss2\",\n \"entrypoints\",\n \"bleach\",\n \"oauth2client\",\n \"llvmlite\",\n \"numba\",\n \"cattrs\",\n \"crashtest\",\n \"mlflow\",\n \"send2trash\",\n \"shapely\",\n \"elasticsearch\",\n \"comm\",\n \"cleo\",\n \"orjson\",\n \"pendulum\",\n \"pytest-runner\",\n \"nbclient\",\n \"aenum\",\n \"pygithub\",\n \"identify\",\n \"msrestazure\",\n \"nodeenv\",\n \"mypy\",\n \"flatbuffers\",\n \"great-expectations\",\n \"mock\",\n \"jupyterlab-server\",\n \"zope-interface\",\n \"pytzdata\",\n \"loguru\",\n \"argon2-cffi\",\n \"tokenizers\",\n \"typeguard\",\n \"overrides\",\n \"tox\",\n \"requests-file\",\n \"humanfriendly\",\n \"json5\",\n \"xlsxwriter\",\n \"pysocks\",\n \"google-pasta\",\n \"cfgv\",\n \"pyathena\",\n \"gast\",\n \"azure-storage-file-datalake\",\n \"ipywidgets\",\n \"rfc3339-validator\",\n \"executing\",\n \"jupyterlab\",\n \"pre-commit\",\n \"django\",\n \"querystring-parser\",\n \"contextlib2\",\n \"cached-property\",\n \"installer\",\n \"deepdiff\",\n \"pure-eval\",\n \"tensorflow-serving-api\",\n \"nltk\",\n \"semver\",\n \"retry\",\n \"hvac\",\n \"pipenv\",\n \"uri-template\",\n \"torch\",\n \"execnet\",\n \"html5lib\",\n \"typer\",\n \"croniter\",\n \"lockfile\",\n \"slack-sdk\",\n \"watchdog\",\n \"dataclasses\",\n \"gremlinpython\",\n \"types-pyyaml\",\n \"tensorflow-io-gcs-filesystem\",\n \"setproctitle\",\n \"azure-mgmt-core\",\n \"responses\",\n \"sphinx\",\n \"statsmodels\",\n \"text-unidecode\",\n \"dataclasses-json\",\n \"pandocfilters\",\n \"pytest-xdist\",\n \"async-lru\",\n \"click-plugins\",\n \"opentelemetry-api\",\n \"selenium\",\n \"safetensors\",\n \"opencv-python\",\n \"python-slugify\",\n \"xgboost\",\n \"distro\",\n \"plotly\",\n \"sentencepiece\",\n \"webcolors\",\n \"types-requests\",\n \"rfc3986\",\n \"terminado\",\n \"jupyter-lsp\",\n \"rfc3986-validator\",\n \"configparser\",\n \"argon2-cffi-bindings\",\n \"cmake\",\n \"fastavro\",\n \"docopt\",\n \"unidecode\",\n \"retrying\",\n \"types-urllib3\",\n \"apache-airflow\",\n \"pytest-mock\",\n \"fqdn\",\n \"isoduration\",\n \"tblib\",\n \"prettytable\",\n \"semantic-version\",\n \"sympy\",\n \"seaborn\",\n \"confluent-kafka\",\n \"azure-keyvault-secrets\",\n \"opt-einsum\",\n \"faker\",\n \"jsonpickle\",\n \"mpmath\",\n \"patsy\",\n \"azure-mgmt-resource\",\n \"libclang\",\n \"opencensus\",\n \"antlr4-python3-runtime\",\n \"pysftp\",\n \"ordered-set\",\n \"pymssql\",\n \"db-dtypes\",\n \"astunparse\",\n \"inflection\",\n \"gcsfs\",\n \"thrift\",\n \"parsedatetime\",\n \"dask\",\n \"deprecation\",\n \"scikit-image\",\n \"azure-datalake-store\",\n \"moto\",\n \"zeep\",\n \"makefun\",\n \"pyhcl\",\n \"boto\",\n \"libcst\",\n \"graphviz\",\n \"stevedore\",\n \"gspread\",\n \"snowballstemmer\",\n \"ujson\",\n \"zope-event\",\n \"gevent\",\n \"pyproj\",\n \"checkov\",\n \"python-gnupg\",\n \"pathos\",\n \"trio\",\n \"trio-websocket\",\n \"azure-eventhub\",\n \"typed-ast\",\n \"kombu\",\n \"shap\",\n \"pox\",\n \"ppft\",\n \"datasets\",\n \"apscheduler\",\n \"torchvision\",\n \"click-man\",\n \"accelerate\",\n \"coloredlogs\",\n \"xxhash\",\n \"brotli\",\n \"mypy-boto3-rds\",\n \"docstring-parser\",\n \"applicationinsights\",\n \"apache-beam\",\n \"structlog\",\n \"tldextract\",\n \"lightgbm\",\n \"email-validator\",\n \"wandb\",\n \"cligj\",\n \"kafka-python\",\n \"pybind11\",\n \"fire\",\n \"celery\",\n \"wsproto\",\n \"pywavelets\",\n \"numexpr\",\n \"authlib\",\n \"datetime\",\n \"colorlog\",\n \"pathlib2\",\n \"uamqp\",\n \"texttable\",\n \"pytest-asyncio\",\n \"google-cloud-logging\",\n \"azure-cosmos\",\n \"delta-spark\",\n \"ecdsa\",\n \"nvidia-cudnn-cu11\",\n \"enum34\",\n \"flask-cors\",\n \"slicer\",\n \"spacy\",\n \"fiona\",\n \"python-jose\",\n \"watchtower\",\n \"unicodecsv\",\n \"imagesize\",\n \"schema\",\n \"alabaster\",\n \"kfp\",\n \"geopandas\",\n \"marshmallow-enum\",\n \"apache-airflow-providers-common-sql\",\n \"pyfunctional\",\n \"dbt-core\",\n \"validators\",\n \"keras-preprocessing\",\n \"holidays\",\n \"python-daemon\",\n \"readme-renderer\",\n \"djangorestframework\",\n \"pandas-gbq\",\n \"azure-storage-queue\",\n \"azure-servicebus\",\n \"hypothesis\",\n \"tifffile\",\n \"sshtunnel\",\n \"graphframes\",\n \"lz4\",\n \"kfp-server-api\",\n \"python-magic\",\n \"invoke\",\n \"avro-python3\",\n \"parse\",\n \"kfp-pipeline-spec\",\n \"freezegun\",\n \"constructs\",\n \"outcome\",\n \"python-multipart\",\n \"billiard\",\n \"monotonic\",\n \"pip-tools\",\n \"vine\",\n \"fasteners\",\n \"ddtrace\",\n \"databricks-sql-connector\",\n \"pycountry\",\n \"azure-keyvault-keys\",\n \"sendgrid\",\n \"click-repl\",\n \"srsly\",\n \"pika\",\n \"chex\",\n \"thinc\",\n \"ijson\",\n \"jira\",\n \"docker-pycreds\",\n \"hpack\",\n \"opencv-python-headless\",\n \"blis\",\n \"flask-sqlalchemy\",\n \"fuzzywuzzy\",\n \"xlwt\",\n \"imbalanced-learn\",\n \"qtconsole\",\n \"pydata-google-auth\",\n \"h2\",\n \"pyproject-api\",\n \"sh\",\n \"lit\",\n \"hyperframe\",\n \"stringcase\",\n \"astor\",\n \"langchain-guides\",\n \"langchain\",\n \"wasabi\",\n \"pytest-metadata\",\n \"bitarray\",\n \"pathtools\",\n \"catalogue\",\n \"nose\",\n \"yapf\",\n \"distributed\",\n \"amqp\",\n \"pathy\",\n \"qtpy\",\n \"types-pytz\",\n \"boto3-stubs\",\n \"triton\",\n \"office365-rest-python-client\",\n \"hatchling\",\n \"jupyter-console\",\n \"slackclient\",\n \"atomicwrites\",\n \"starkbank-ecdsa\",\n \"omegaconf\",\n \"editables\",\n \"uvloop\",\n \"humanize\",\n \"knack\",\n \"botocore-stubs\",\n \"iso8601\",\n \"smdebug-rulesconfig\",\n \"crcmod\",\n \"torchmetrics\",\n \"fastparquet\",\n \"python-levenshtein\",\n \"pytimeparse\",\n \"mypy-boto3-s3\",\n \"einops\",\n \"pywin32\",\n \"jpype1\",\n \"pydeequ\",\n \"cog\",\n \"azure-cli\",\n \"pymeeus\",\n \"types-six\",\n \"murmurhash\",\n \"ansible\",\n \"pyspnego\",\n \"inflect\",\n \"phonenumbers\",\n \"flask-wtf\",\n \"cymem\",\n \"preshed\",\n \"cdk-nag\",\n \"aws-requests-auth\",\n \"google-cloud-audit-log\",\n \"ua-parser\",\n \"jsondiff\",\n \"yamllint\",\n \"nbclassic\",\n \"cerberus\",\n \"lazy-loader\",\n \"dacite\",\n \"statsd\",\n \"cssselect\",\n \"dpath\",\n \"apispec\",\n \"gensim\",\n \"django-cors-headers\",\n \"ruff\",\n \"gradio\",\n \"convertdate\",\n \"scp\",\n \"geopy\",\n \"sqlalchemy-utils\",\n \"azure-data-tables\",\n \"pypdf2\",\n \"partd\",\n \"graphql-core\",\n \"python-gitlab\",\n \"ninja\",\n \"ratelimit\",\n \"junit-xml\",\n \"levenshtein\",\n \"fabric\",\n \"pydot\",\n \"azure-storage-file-share\",\n \"pytorch-lightning\",\n \"watchfiles\",\n \"types-setuptools\",\n \"requests-mock\",\n \"strip-hints\",\n \"keras-applications\",\n \"pyotp\",\n \"mashumaro\",\n \"apache-airflow-providers-http\",\n \"ipaddress\",\n \"timm\",\n \"click-didyoumean\",\n \"bytecode\",\n \"parameterized\",\n \"netaddr\",\n \"flask-appbuilder\",\n \"pyperclip\",\n \"openapi-spec-validator\",\n \"onnx\",\n \"marshmallow-sqlalchemy\",\n \"locket\",\n \"lark\",\n \"mysqlclient\",\n \"confection\",\n \"pytest-html\",\n \"azure-cosmosdb-table\",\n \"agate\",\n \"geographiclib\",\n \"types-paramiko\",\n \"pytest-rerunfailures\",\n \"pyserial\",\n \"spacy-loggers\",\n \"flask-login\",\n \"flask-jwt-extended\",\n \"azure-devops\",\n \"xarray\",\n \"spark-nlp\",\n \"dateparser\",\n \"onnxruntime\",\n \"twisted\",\n \"lightning-utilities\",\n \"wtforms\",\n \"jaydebeapi\",\n \"bokeh\",\n \"natsort\",\n \"google-cloud-bigtable\",\n \"grpcio-health-checking\",\n \"tensorflow-text\",\n \"twine\",\n \"commonmark\",\n \"grpcio-reflection\",\n \"flask-caching\",\n \"cron-descriptor\",\n \"pyaml\",\n \"geoip2\",\n \"nh3\",\n \"autopep8\",\n \"python-editor\",\n \"logbook\",\n \"ftfy\",\n \"cachelib\",\n \"datadog-api-client\",\n \"jupyter\",\n \"hologram\",\n \"protobuf3-to-dict\",\n \"ndg-httpsclient\",\n \"promise\",\n \"azureml-core\",\n \"pydub\",\n \"jax\",\n \"flit-core\",\n \"zstandard\",\n \"cssselect2\",\n \"minimal-snowplow-tracker\",\n \"dbt-extractor\",\n \"connexion\",\n \"azure-keyvault-certificates\",\n \"configargparse\",\n \"aniso8601\",\n \"cairocffi\",\n \"hyperlink\",\n \"cramjam\",\n \"elasticsearch-dsl\",\n \"mypy-boto3-redshift-data\",\n \"h3\",\n \"cairosvg\",\n \"maxminddb\",\n \"pytz-deprecation-shim\",\n \"reportlab\",\n \"langcodes\",\n \"pytest-forked\",\n \"pymupdf\",\n \"ansible-core\",\n \"cloudevents\",\n \"leather\",\n \"ddsketch\",\n \"jaxlib\",\n \"oldest-supported-numpy\",\n \"tiktoken\",\n \"supervisor\",\n \"diskcache\",\n \"functions-framework\",\n \"hdfs\",\n \"apache-airflow-providers-ssh\",\n \"gradio-client\",\n \"azure-multiapi-storage\",\n \"funcsigs\",\n \"azure-kusto-data\",\n \"envier\",\n \"pyhive\",\n \"types-protobuf\",\n \"django-filter\",\n \"elastic-transport\",\n \"parse-type\",\n \"types-python-dateutil\",\n \"boltons\",\n \"python-docx\",\n \"twilio\",\n \"twilio-python\",\n \"pgpy\",\n \"korean-lunar-calendar\",\n \"azure-eventgrid\",\n \"async-generator\",\n \"globus-sdk\",\n \"apache-airflow-providers-imap\",\n \"sentence-transformers\",\n \"mkdocs-material\",\n \"aws-xray-sdk\",\n \"resolvelib\",\n \"linkify-it-py\",\n \"setuptools-rust\",\n \"google\",\n \"terminaltables\",\n \"keystoneauth1\",\n \"apache-airflow-providers-ftp\",\n \"javaproperties\",\n \"sqlalchemy-redshift\",\n \"jdcal\",\n \"pep517\",\n \"incremental\",\n \"limits\",\n \"unittest-xml-reporting\",\n \"frozendict\",\n \"service-identity\",\n \"factory-boy\",\n \"ml-dtypes\",\n \"addict\",\n \"uc-micro-py\",\n \"shortuuid\",\n \"pypandoc\",\n \"blessed\",\n \"cx-oracle\",\n \"requests-ntlm\",\n \"django-extensions\",\n \"apache-airflow-providers-amazon\",\n \"python-keystoneclient\",\n \"bracex\",\n \"cmdstanpy\",\n \"apache-airflow-providers-sqlite\",\n \"cookiecutter\",\n \"types-cryptography\",\n \"flask-session\",\n \"timezonefinder\",\n \"magicattr\",\n \"pymsteams\",\n \"pylint-plugin-utils\",\n \"voluptuous\",\n \"langsmith\",\n \"cinemagoer\",\n \"passlib\",\n \"imdbpy\",\n \"emoji\",\n \"databricks-api\",\n \"configobj\",\n \"bandit\",\n \"ultralytics\",\n \"w3lib\",\n \"dirac\",\n \"backports-functools-lru-cache\",\n \"tableauserverclient\",\n \"automat\",\n \"pypika\",\n \"pydash\",\n \"py-cpuinfo\",\n \"mmh3\",\n \"tokenize-rt\",\n \"python-swiftclient\",\n \"tensorflow-hub\",\n \"librosa\",\n \"webdriver-manager\",\n \"constantly\",\n \"user-agents\",\n \"injector\",\n \"youtube-dl\",\n \"pdfminer-six\",\n \"markdown2\",\n \"ffmpy\",\n \"mergedeep\",\n \"netifaces\",\n \"databricks-sdk\",\n \"azure-keyvault-administration\",\n \"ephem\",\n \"flax\",\n \"urllib3-secure-extra\",\n \"looker-sdk\",\n \"kornia\",\n \"python3-openid\",\n \"userpath\",\n \"polars\",\n \"tensorboardx\",\n \"openapi-schema-validator\",\n \"jellyfish\",\n \"ray\",\n \"django-storages\",\n \"asyncpg\",\n \"dynamodb-json\",\n \"pycocotools\",\n \"lunarcalendar\",\n \"types-redis\",\n \"dm-tree\",\n \"flask-limiter\",\n \"scapy\",\n \"sacremoses\",\n \"hiredis\",\n \"netcdf4\",\n \"pyhocon\",\n \"cmaes\",\n \"feedparser\",\n \"firebase-admin\",\n \"yacs\",\n \"prison\",\n \"pytest-localserver\",\n \"polling2\",\n \"flask-babel\",\n \"influxdb\",\n \"binaryornot\",\n \"psycopg3\",\n \"sarif-om\",\n \"jschema-to-python\",\n \"cfn-flip\",\n \"google-apitools\",\n \"ipdb\",\n \"pyrfc3339\",\n \"filterpy\",\n \"py-spy\",\n \"wcmatch\",\n \"launchdarkly-server-sdk\",\n \"pyelftools\",\n \"logging-azure-rest\",\n \"python-jenkins\",\n \"apache-airflow-providers-cncf-kubernetes\",\n \"google-ads\",\n \"clickclick\",\n \"streamlit\",\n \"pylint-django\",\n \"yq\",\n \"findspark\",\n \"pycares\",\n \"mkdocs\",\n \"pytimeparse2\",\n \"ldap3\",\n \"pyee\",\n \"pydocstyle\",\n \"catboost\",\n \"sqlalchemy-jsonfield\",\n \"optuna\",\n \"aws-lambda-powertools\",\n \"lru-dict\",\n \"rasterio\",\n \"cartoframes\",\n \"carto\",\n \"aiodns\",\n \"pyrestcli\",\n \"opentracing\",\n \"tensorflow-datasets\",\n \"apache-airflow-providers-google\",\n \"jsonlines\",\n \"azure\",\n \"backports-weakref\",\n \"diff-cover\",\n \"cftime\",\n \"azure-kusto-ingest\",\n \"qrcode\",\n \"redis-py-cluster\",\n \"diffusers\",\n \"grpclib\",\n \"pypdf\",\n \"thrift-sasl\",\n \"django-debug-toolbar\",\n \"dynaconf\",\n \"django-redis\",\n \"salesforce-bulk\",\n \"kazoo\",\n \"configupdater\",\n \"comtypes\",\n \"langdetect\",\n \"hydra-core\",\n \"pytest-django\",\n \"pywin32-ctypes\",\n \"pyminizip\",\n \"pathvalidate\",\n \"google-re2\",\n \"idna-ssl\",\n \"dagster-pandas\",\n \"toposort\",\n \"expiringdict\",\n \"rdflib\",\n \"etils\",\n \"rich-argparse\",\n \"xyzservices\",\n \"bottle\",\n \"oslo-utils\",\n \"prophet\",\n \"pdfplumber\",\n \"azure-mgmt-subscription\",\n \"parsl\",\n \"jsii\",\n \"click-option-group\",\n \"analytics-python\",\n \"home-run\",\n \"funcx\",\n \"funcx-common\",\n \"lmdb\",\n \"zict\",\n \"multi-key-dict\",\n \"hatch-fancy-pypi-readme\",\n \"googlemaps\",\n \"pyudev\",\n \"atlassian-python-api\",\n \"dohq-artifactory\",\n \"oslo-i18n\",\n \"whitenoise\",\n \"aiosqlite\",\n \"python-engineio\",\n \"enum-compat\",\n \"affine\",\n \"fs\",\n \"flake8-bugbear\",\n \"hyperopt\",\n \"multipledispatch\",\n \"oslo-serialization\",\n \"pygeohash\",\n \"somnium\",\n \"kaleido\",\n \"python-snappy\",\n \"python-pptx\",\n \"gql\",\n \"pymdown-extensions\",\n \"wexpect\",\n \"types-pyopenssl\",\n \"foundationdb\",\n \"jsonschema-spec\",\n \"iopath\",\n \"snuggs\",\n \"strict-rfc3339\",\n \"tablib\",\n \"orderedmultidict\",\n \"sqlglot\",\n \"fakeredis\",\n \"pystan\",\n \"python-socketio\",\n \"robotframework\",\n \"pkgconfig\",\n \"pycairo\",\n \"python-consul\",\n \"curlify\",\n \"types-toml\",\n \"backports-tempfile\",\n \"multimethod\",\n \"pynamodb\",\n \"docker-compose\",\n \"munch\",\n \"torchaudio\",\n \"elementpath\",\n \"mypy-boto3-lambda\",\n \"python-decouple\",\n \"mypy-boto3-dynamodb\",\n \"pylev\",\n \"pmdarima\",\n \"drf-yasg\",\n \"path\",\n \"pyxlsb\",\n \"pandasql\",\n \"pipdeptree\",\n \"debtcollector\",\n \"nvidia-ml-py\",\n \"pyinstaller-hooks-contrib\",\n \"dvclive\",\n \"koalas\",\n \"arviz\",\n \"coreapi\",\n \"sqlalchemy-bigquery\",\n \"pyquery\",\n \"webob\",\n \"faiss-cpu\",\n \"flower\",\n \"cloudformation-cli\",\n \"azureml-dataset-runtime\",\n \"azure-mgmt\",\n \"cloudformation-cli-java-plugin\",\n \"pyinstaller\",\n \"python-box\",\n \"pympler\",\n \"mypy-boto3-secretsmanager\",\n \"marshmallow-oneofschema\",\n \"schedule\",\n \"resampy\",\n \"bitstring\",\n \"timeout-decorator\",\n \"furl\",\n \"bidict\",\n \"setuptools-git\",\n \"jsonmerge\",\n \"htmlmin\",\n \"plumbum\",\n \"gdown\",\n \"evergreen-py\",\n \"tableauhyperapi\",\n \"xformers\",\n \"yt-dlp\",\n \"waitress\",\n \"mypy-boto3-cloudformation\",\n \"tld\",\n \"pipx\",\n \"fake-useragent\",\n \"junitparser\",\n \"pylint-flask\",\n \"jaraco-functools\",\n \"geomet\",\n \"yappi\",\n \"flask-openid\",\n \"apache-airflow-providers-snowflake\",\n \"ciso8601\",\n \"paho-mqtt\",\n \"aiohttp-retry\",\n \"smbprotocol\",\n \"mypy-protobuf\",\n \"msgpack-python\",\n \"dockerpty\",\n \"cssutils\",\n \"djangorestframework-simplejwt\",\n \"wordcloud\",\n \"pytest-env\",\n \"django-environ\",\n \"s3cmd\",\n \"graphene\",\n \"soundfile\",\n \"html2text\",\n \"dagster-dbt\",\n \"apache-airflow-providers-databricks\",\n \"python-nvd3\",\n \"pygobject\",\n \"azureml-sdk\",\n \"click-default-group\",\n \"azureml-dataprep\",\n \"pygit2\",\n \"boto3-type-annotations\",\n \"imagehash\",\n \"ec2-metadata\",\n \"requests-futures\",\n \"rx\",\n \"geventhttpclient\",\n \"wget\",\n \"xmlschema\",\n \"python-rapidjson\",\n \"playwright\",\n \"flatten-json\",\n \"collections-extended\",\n \"myst-parser\",\n \"flask-restful\",\n \"facebook-business\",\n \"pdpyras\",\n \"python-crfsuite\",\n \"pydeck\",\n \"dash-core-components\",\n \"publication\",\n \"zthreading\",\n \"cheroot\",\n \"minio\",\n \"uwsgi\",\n \"portpicker\",\n \"simplegeneric\",\n \"python-crontab\",\n \"basicsr\",\n \"facexlib\",\n \"testpath\",\n \"json-log-formatter\",\n \"ghp-import\",\n \"sseclient-py\",\n \"ansi2html\",\n \"jproperties\",\n \"django-timezone-field\",\n \"duckdb\",\n \"pygsheets\",\n \"pyzstd\",\n \"opencv-contrib-python\",\n \"pyyaml-env-tag\",\n \"pyaes\",\n \"pooch\",\n \"funcy\",\n \"appnope\",\n \"cerberus-python-client\",\n \"realesrgan\",\n \"readchar\",\n \"cassandra-driver\",\n \"requests-unixsocket\",\n \"pyproject-metadata\",\n \"dictdiffer\",\n \"pypng\",\n \"ffmpeg-python\",\n \"locust\",\n \"pymc\",\n \"modelx\",\n \"ffn\",\n \"finance-py\",\n \"gs-quant\",\n \"tf-quant-finance\",\n \"finta\",\n \"qstrader\",\n \"blankly\",\n \"ta-lib-python\",\n \"zipline\",\n \"bt\",\n \"backtrader\",\n \"pyalgotrade\",\n \"pandas-ta\",\n \"ta\",\n \"finmarket-py\",\n \"zvt\",\n \"py-portfolio-opt\",\n \"eiten\",\n \"backtesting-py\",\n \"quantstats\",\n \"qtpylib\",\n \"freqtrade\",\n \"qlib\",\n \"jesse\",\n \"finrl\",\n \"bulbea\",\n \"octobot\",\n \"tda-api\",\n \"vectorbt\",\n \"lean\",\n \"pybroker\",\n \"pyfolio\",\n \"empyrical\",\n \"finquant\",\n \"riskfolio-lib\",\n \"alphalens\",\n \"arch\",\n \"pyflux\",\n \"tsfresh\",\n \"gluonts\",\n \"yfinance\",\n \"alpha-vantage\",\n \"pandas-datareader\",\n \"yahoo-finance\",\n \"findatapy\",\n \"wallstreet\",\n \"alpaca-trade-api-python\",\n \"investpy\",\n \"xlwings\",\n \"dtale\",\n \"mplfinance\",\n \"keras\",\n \"opensearch-py\",\n \"openai\",\n \"dash\",\n \"stripe\",\n]" }, { "identifier": "OPENAI_MODELS", "path": "constants/cli.py", "snippet": "OPENAI_MODELS = [\n \"gpt-4-1106-preview\",\n \"gpt-4\",\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n]" }, { "identifier": "MODELS_TO_TOKENS", "path": "constants/ai.py", "snippet": "MODELS_TO_TOKENS = {\n \"gpt-4\": 8192,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-32k\": 32768,\n \"gpt-3.5-turbo\": 4097,\n \"gpt-3.5-turbo-16k\": 16385,\n}" } ]
import os import openai import sys import argparse import traceback from getpass import getpass from rich import print as rprint from utils.utils import print_markdown, print_exception, extract_code_blocks, print_help from utils.stream import TextStream from utils.ai import ( retrieve_context, construct_prompt, get_remote_chat_response, get_other_chat_response, ) from constants.cli import ARGUMENTS, LIBRARIES, OPENAI_MODELS from constants.ai import MODELS_TO_TOKENS
12,261
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window
if model in OPENAI_MODELS:
11
2023-11-02 07:07:13+00:00
16k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=None,\n types=None,\n post=False,\n loop=None,\n ):\n Judge.clear()\n self._judges = get_judges(judges, timeout, verify_ssl)\n self._method = 'POST' if post else 'GET'\n self._max_tries = max_tries\n self._real_ext_ip = real_ext_ip\n self._strict = strict\n self._dnsbl = dnsbl or []\n self._types = types or {}\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = Resolver(loop=self._loop)\n\n self._req_http_proto = not types or bool(\n ('HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5') & types.keys()\n )\n self._req_https_proto = not types or bool(('HTTPS',) & types.keys())\n self._req_smtp_proto = not types or bool(('CONNECT:25',) & types.keys()) # noqa\n\n self._ngtrs = {proto for proto in types or NGTRS}\n\n async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;' % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n\n def _types_passed(self, proxy):\n if not self._types:\n return True\n for proto, lvl in proxy.types.copy().items():\n req_levels = self._types.get(proto)\n if not req_levels or (lvl in req_levels):\n if not self._strict:\n return True\n else:\n if self._strict:\n del proxy.types[proto]\n if self._strict and proxy.types:\n return True\n proxy.log('Protocol or the level of anonymity differs from the requested')\n return False\n\n async def _in_DNSBL(self, host):\n _host = '.'.join(reversed(host.split('.'))) # reverse address\n tasks = []\n for domain in self._dnsbl:\n query = '.'.join([_host, domain])\n tasks.append(self._resolver.resolve(query, logging=False))\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n if any([r for r in responses if not isinstance(r, ResolveError)]):\n return True\n return False\n\n async def check(self, proxy):\n if self._dnsbl:\n if await self._in_DNSBL(proxy.host):\n proxy.log('Found in DNSBL')\n return False\n\n if self._req_http_proto:\n await Judge.ev['HTTP'].wait()\n if self._req_https_proto:\n await Judge.ev['HTTPS'].wait()\n if self._req_smtp_proto:\n await Judge.ev['SMTP'].wait()\n\n if proxy.expected_types:\n ngtrs = proxy.expected_types & self._ngtrs\n else:\n ngtrs = self._ngtrs\n\n results = []\n for proto in ngtrs:\n if proto == 'CONNECT:25':\n result = await self._check_conn_25(proxy, proto)\n else:\n result = await self._check(proxy, proto)\n results.append(result)\n\n proxy.is_working = True if any(results) else False\n\n if proxy.is_working and self._types_passed(proxy):\n return True\n return False\n\n async def _check_conn_25(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n proxy.types[proxy.ngtr.name] = None\n result = True\n break\n finally:\n proxy.close()\n return result\n\n async def _check(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n headers, content, rv = await _send_test_request(\n self._method, proxy, judge\n )\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n content = _decompress_content(headers, content)\n result = _check_test_response(proxy, headers, content, rv)\n if result:\n if proxy.ngtr.check_anon_lvl:\n lvl = _get_anonymity_lvl(\n self._real_ext_ip, proxy, judge, content\n )\n else:\n lvl = None\n proxy.types[proxy.ngtr.name] = lvl\n break\n finally:\n proxy.close()\n return result" }, { "identifier": "ResolveError", "path": "proxyhub/errors.py", "snippet": "class ResolveError(Exception):\n pass" }, { "identifier": "PROVIDERS", "path": "proxyhub/providers.py", "snippet": "PROVIDERS = [\n Provider(\n url='http://www.proxylists.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 49\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks4',\n proto=('SOCKS4'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks5',\n proto=('SOCKS5'),\n ), # added by ZerGo0\n Provider(\n url='http://ipaddress.com/proxy-list/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 53\n Provider(\n url='https://www.sslproxies.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 100\n Provider(\n url='https://freshfreeproxylist.wordpress.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 50\n Provider(\n url='http://proxytime.ru/http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 1400\n Provider(\n url='https://free-proxy-list.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 300\n Provider(\n url='https://us-proxy.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://fineproxy.org/eng/fresh-proxies/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 5500\n Provider(url='https://socks-proxy.net/', proto=('SOCKS4', 'SOCKS5')), # 80\n Provider(\n url='http://www.httptunnel.ge/ProxyListForFree.aspx',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://cn-proxy.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 70\n Provider(\n url='https://hugeproxies.com/home/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 800\n Provider(\n url='http://proxy.rufey.ru/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 153\n Provider(\n url='https://geekelectronics.org/my-servisy/proxy',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 400\n Provider(\n url='http://pubproxy.com/api/proxy?limit=20&format=txt',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 20\n Proxy_list_org(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 140\n Xseo_in(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 240\n Spys_ru(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 660\n Proxylistplus_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 450\n Proxylist_me(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 2872\n Foxtools_ru(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'), max_conn=1\n ), # noqa; 500\n Gatherproxy_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 3212\n Nntime_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 1050\n Blogspot_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 24800\n Gatherproxy_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 30\n Blogspot_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1486\n Tools_rosinstrument_com(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')\n ), # noqa; 4000\n Tools_rosinstrument_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1800\n My_proxy_com(max_conn=2), # noqa; 1000\n Checkerproxy_net(), # noqa; 60000\n Aliveproxy_com(), # noqa; 210\n Freeproxylists_com(), # noqa; 1338\n Webanetlabs_net(), # noqa; 5000\n Maxiproxies_com(), # noqa; 430\n Proxylist_download(), # noqa; 35590\n # # Bad...\n # http://www.proxylist.ro/\n # Provider(url='http://proxydb.net/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS',\n # 'CONNECT:25', 'SOCKS4', 'SOCKS5')),\n # Provider(url='http://www.cybersyndrome.net/pla6.html',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 1100\n # Provider(url='https://www.ip-adress.com/proxy-list',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 57\n # Provider(url='https://www.marcosbl.com/lab/proxies/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 89\n # Provider(url='http://go4free.xyz/Free-Proxy/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 196\n # Provider(url='http://blackstarsecurity.com/proxy-list.txt'), # 7014\n # Provider(url='http://www.get-proxy.net/proxy-archives'), # 519\n # Proxyb_net(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 857\n # Proxz_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n # max_conn=2), # 443\n # Proxynova_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 818\n # _50kproxies_com(), # 822\n # Free_proxy_cz(), # 420\n]" }, { "identifier": "Provider", "path": "proxyhub/providers.py", "snippet": "class Provider:\n \"\"\"Proxy provider.\n\n Provider - a website that publish free public proxy lists.\n\n :param str url: Url of page where to find proxies\n :param tuple proto:\n (optional) List of the types (protocols) that may be supported\n by proxies returned by the provider. Then used as :attr:`Proxy.types`\n :param int max_conn:\n (optional) The maximum number of concurrent connections on the provider\n :param int max_tries:\n (optional) The maximum number of attempts to receive response\n :param int timeout:\n (optional) Timeout of a request in seconds\n \"\"\"\n\n _pattern = IPPortPatternGlobal\n\n def __init__(\n self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None\n ):\n if url:\n self.domain = urlparse(url).netloc\n self.url = url\n self.proto = proto\n self._max_tries = max_tries\n self._timeout = timeout\n self._session = None\n self._cookies = {}\n self._proxies = set()\n # concurrent connections on the current provider\n self._sem_provider = asyncio.Semaphore(max_conn)\n self._loop = loop or asyncio.get_event_loop()\n\n @property\n def proxies(self):\n \"\"\"Return all found proxies.\n\n :return:\n Set of tuples with proxy hosts, ports and types (protocols)\n that may be supported (from :attr:`.proto`).\n\n For example:\n {('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}\n\n :rtype: set\n \"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, new):\n new = [(host, port, self.proto) for host, port in new if port]\n self._proxies.update(new)\n\n async def get_proxies(self):\n \"\"\"Receive proxies from the provider and return them.\n\n :return: :attr:`.proxies`\n \"\"\"\n log.debug('Try to get proxies from %s' % self.domain)\n\n async with aiohttp.ClientSession(\n headers=get_headers(), cookies=self._cookies, loop=self._loop\n ) as self._session:\n await self._pipe()\n\n log.debug(\n '%d proxies received from %s: %s'\n % (len(self.proxies), self.domain, self.proxies)\n )\n return self.proxies\n\n async def _pipe(self):\n await self._find_on_page(self.url)\n\n async def _find_on_pages(self, urls):\n if not urls:\n return\n tasks = []\n if not isinstance(urls[0], dict):\n urls = set(urls)\n for url in urls:\n if isinstance(url, dict):\n tasks.append(self._find_on_page(**url))\n else:\n tasks.append(self._find_on_page(url))\n await asyncio.gather(*tasks)\n\n async def _find_on_page(self, url, data=None, headers=None, method='GET'):\n page = await self.get(url, data=data, headers=headers, method=method)\n oldcount = len(self.proxies)\n try:\n received = self.find_proxies(page)\n except Exception as e:\n received = []\n log.error(\n 'Error when executing find_proxies.'\n 'Domain: %s; Error: %r' % (self.domain, e)\n )\n self.proxies = received\n added = len(self.proxies) - oldcount\n log.debug(\n '%d(%d) proxies added(received) from %s' % (added, len(received), url)\n )\n\n async def get(self, url, data=None, headers=None, method='GET'):\n for _ in range(self._max_tries):\n page = await self._get(url, data=data, headers=headers, method=method)\n if page:\n break\n return page\n\n async def _get(self, url, data=None, headers=None, method='GET'):\n page = ''\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with self._sem_provider, self._session.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as resp:\n page = await resp.text()\n if resp.status != 200:\n log.debug(\n 'url: %s\\nheaders: %s\\ncookies: %s\\npage:\\n%s'\n % (url, resp.headers, resp.cookies, page)\n )\n raise BadStatusError('Status: %s' % resp.status)\n except (\n UnicodeDecodeError,\n BadStatusError,\n asyncio.TimeoutError,\n aiohttp.ClientOSError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n ) as e:\n page = ''\n log.debug('%s is failed. Error: %r;' % (url, e))\n return page\n\n def find_proxies(self, page):\n return self._find_proxies(page)\n\n def _find_proxies(self, page):\n proxies = self._pattern.findall(page)\n return proxies" }, { "identifier": "Proxy", "path": "proxyhub/proxy.py", "snippet": "class Proxy:\n \"\"\"Proxy.\n\n :param str host: IP address of the proxy\n :param int port: Port of the proxy\n :param tuple types:\n (optional) List of types (protocols) which may be supported\n by the proxy and which can be checked to work with the proxy\n :param int timeout:\n (optional) Timeout of a connection and receive a response in seconds\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n\n :raises ValueError: If the host not is IP address, or if the port > 65535\n \"\"\"\n\n @classmethod\n async def create(cls, host, *args, **kwargs):\n \"\"\"Asynchronously create a :class:`Proxy` object.\n\n :param str host: A passed host can be a domain or IP address.\n If the host is a domain, try to resolve it\n :param str *args:\n (optional) Positional arguments that :class:`Proxy` takes\n :param str **kwargs:\n (optional) Keyword arguments that :class:`Proxy` takes\n\n :return: :class:`Proxy` object\n :rtype: proxyhub.Proxy\n\n :raises ResolveError: If could not resolve the host\n :raises ValueError: If the port > 65535\n \"\"\" # noqa: W605\n loop = kwargs.pop('loop', None)\n resolver = kwargs.pop('resolver', Resolver(loop=loop))\n try:\n _host = await resolver.resolve(host)\n self = cls(_host, *args, **kwargs)\n except (ResolveError, ValueError) as e:\n log.error('%s:%s: Error at creating: %s' % (host, args[0], e))\n raise\n return self\n\n def __init__(self, host=None, port=None, types=(), timeout=8, verify_ssl=False):\n self.host = host\n if not Resolver.host_is_ip(self.host):\n raise ValueError(\n 'The host of proxy should be the IP address. '\n 'Try Proxy.create() if the host is a domain'\n )\n\n self.port = int(port)\n if self.port > 65535:\n raise ValueError('The port of proxy cannot be greater than 65535')\n\n self.expected_types = set(types) & {\n 'HTTP',\n 'HTTPS',\n 'CONNECT:80',\n 'CONNECT:25',\n 'SOCKS4',\n 'SOCKS5',\n }\n self._timeout = timeout\n self._ssl_context = True if verify_ssl else _ssl._create_unverified_context()\n self._types = {}\n self._is_working = False\n self.stat = {'requests': 0, 'errors': Counter()}\n self._ngtr = None\n self._geo = Resolver.get_ip_info(self.host)\n self._log = []\n self._runtimes = []\n self._schemes = ()\n self._closed = True\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n\n def __repr__(self):\n \"\"\"Class representation\n e.g. <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>\n \"\"\"\n tpinfo = []\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n s = '{tp}: {lvl}' if lvl else '{tp}'\n s = s.format(tp=tp, lvl=lvl)\n tpinfo.append(s)\n tpinfo = ', '.join(tpinfo)\n return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(\n code=self._geo.code,\n types=tpinfo,\n host=self.host,\n port=self.port,\n avg=self.avg_resp_time,\n )\n\n @property\n def types(self):\n \"\"\"Types (protocols) supported by the proxy.\n\n | Where key is type, value is level of anonymity\n (only for HTTP, for other types level always is None).\n | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n | Available levels: Transparent, Anonymous, High.\n\n :rtype: dict\n \"\"\"\n return self._types\n\n @property\n def is_working(self):\n \"\"\"True if the proxy is working, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._is_working\n\n @is_working.setter\n def is_working(self, val):\n self._is_working = val\n\n @property\n def writer(self):\n return self._writer.get('ssl') or self._writer.get('conn')\n\n @property\n def reader(self):\n return self._reader.get('ssl') or self._reader.get('conn')\n\n @property\n def priority(self):\n return (self.error_rate, self.avg_resp_time)\n\n @property\n def error_rate(self):\n \"\"\"Error rate: from 0 to 1.\n\n For example: 0.7 = 70% requests ends with error.\n\n :rtype: float\n\n .. versionadded:: 0.2.0\n \"\"\"\n if not self.stat['requests']:\n return 0\n return round(sum(self.stat['errors'].values()) / self.stat['requests'], 2)\n\n @property\n def schemes(self):\n \"\"\"Return supported schemes.\"\"\"\n if not self._schemes:\n _schemes = []\n if self.types.keys() & _HTTP_PROTOS:\n _schemes.append('HTTP')\n if self.types.keys() & _HTTPS_PROTOS:\n _schemes.append('HTTPS')\n self._schemes = tuple(_schemes)\n return self._schemes\n\n @property\n def avg_resp_time(self):\n \"\"\"The average connection/response time.\n\n :rtype: float\n \"\"\"\n if not self._runtimes:\n return 0\n return round(sum(self._runtimes) / len(self._runtimes), 2)\n\n @property\n def avgRespTime(self):\n \"\"\"\n .. deprecated:: 2.0\n Use :attr:`avg_resp_time` instead.\n \"\"\"\n warnings.warn(\n '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.',\n DeprecationWarning,\n )\n return self.avg_resp_time\n\n @property\n def geo(self):\n \"\"\"Geo information about IP address of the proxy.\n\n :return:\n Named tuple with fields:\n * ``code`` - ISO country code\n * ``name`` - Full name of country\n * ``region_code`` - ISO region code\n * ``region_name`` - Full name of region\n * ``city_name`` - Full name of city\n :rtype: collections.namedtuple\n\n .. versionchanged:: 0.2.0\n In previous versions return a dictionary, now named tuple.\n \"\"\"\n return self._geo\n\n @property\n def ngtr(self):\n return self._ngtr\n\n @ngtr.setter\n def ngtr(self, proto):\n self._ngtr = NGTRS[proto](self)\n\n def as_json(self):\n \"\"\"Return the proxy's properties in JSON format.\n\n :rtype: dict\n \"\"\"\n info = {\n 'host': self.host,\n 'port': self.port,\n 'geo': {\n 'country': {'code': self._geo.code, 'name': self._geo.name},\n 'region': {\n 'code': self._geo.region_code,\n 'name': self._geo.region_name,\n },\n 'city': self._geo.city_name,\n },\n 'types': [],\n 'avg_resp_time': self.avg_resp_time,\n 'error_rate': self.error_rate,\n }\n\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n info['types'].append({'type': tp, 'level': lvl or ''})\n return info\n\n def as_text(self):\n \"\"\"\n Return proxy as host:port\n\n :rtype: str\n \"\"\"\n return \"{}:{}\\n\".format(self.host, self.port)\n\n def log(self, msg, stime=0, err=None):\n ngtr = self.ngtr.name if self.ngtr else 'INFO'\n runtime = time.time() - stime if stime else 0\n log.debug(\n '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(\n h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime\n )\n )\n trunc = '...' if len(msg) > 58 else ''\n msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)\n self._log.append((ngtr, msg, runtime))\n if err:\n self.stat['errors'][err.errmsg] += 1\n if runtime and 'timeout' not in msg:\n self._runtimes.append(runtime)\n\n def get_log(self):\n \"\"\"Proxy log.\n\n :return: The proxy log in format: (negotaitor, msg, runtime)\n :rtype: tuple\n\n .. versionadded:: 0.2.0\n \"\"\"\n return self._log\n\n async def connect(self, ssl=False):\n err = None\n msg = '%s' % 'SSL: ' if ssl else ''\n stime = time.time()\n self.log('%sInitial connection' % msg)\n try:\n if ssl:\n _type = 'ssl'\n sock = self._writer['conn'].get_extra_info('socket')\n params = {\n 'ssl': self._ssl_context,\n 'sock': sock,\n 'server_hostname': self.host,\n }\n else:\n _type = 'conn'\n params = {'host': self.host, 'port': self.port}\n self._reader[_type], self._writer[_type] = await asyncio.wait_for(\n asyncio.open_connection(**params), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg += 'Connection: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionRefusedError, OSError, _ssl.SSLError):\n msg += 'Connection: failed'\n err = ProxyConnError(msg)\n raise err\n # except asyncio.CancelledError:\n # log.debug('Cancelled in proxy.connect()')\n # raise ProxyConnError()\n else:\n msg += 'Connection: success'\n self._closed = False\n finally:\n self.stat['requests'] += 1\n self.log(msg, stime, err=err)\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n if self.writer:\n # try:\n self.writer.close()\n # except RuntimeError:\n # print('Try proxy.close() when loop is closed:',\n # asyncio.get_event_loop()._closed)\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n self.log('Connection: closed')\n self._ngtr = None\n\n async def send(self, req):\n msg, err = '', None\n _req = req.encode() if not isinstance(req, bytes) else req\n try:\n self.writer.write(_req)\n await self.writer.drain()\n except ConnectionResetError:\n msg = '; Sending: failed'\n err = ProxySendError(msg)\n raise err\n finally:\n self.log('Request: %s%s' % (req, msg), err=err)\n\n async def recv(self, length=0, head_only=False):\n resp, msg, err = b'', '', None\n stime = time.time()\n try:\n resp = await asyncio.wait_for(\n self._recv(length, head_only), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg = 'Received: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionResetError, OSError):\n msg = 'Received: failed' # (connection is reset by the peer)\n err = ProxyRecvError(msg)\n raise err\n else:\n msg = 'Received: %s bytes' % len(resp)\n if not resp:\n err = ProxyEmptyRecvError(msg)\n raise err\n finally:\n if resp:\n msg += ': %s' % resp[:12]\n self.log(msg, stime, err=err)\n return resp\n\n async def _recv(self, length=0, head_only=False):\n resp = b''\n if length:\n try:\n resp = await self.reader.readexactly(length)\n except asyncio.IncompleteReadError as e:\n resp = e.partial\n else:\n body_size, body_recv, chunked = 0, 0, None\n while not self.reader.at_eof():\n line = await self.reader.readline()\n resp += line\n if body_size:\n body_recv += len(line)\n if body_recv >= body_size:\n break\n elif chunked and line == b'0\\r\\n':\n break\n elif not body_size and line == b'\\r\\n':\n if head_only:\n break\n headers = parse_headers(resp)\n body_size = int(headers.get('Content-Length', 0))\n if not body_size:\n chunked = headers.get('Transfer-Encoding') == 'chunked'\n return resp" }, { "identifier": "Resolver", "path": "proxyhub/resolver.py", "snippet": "class Resolver:\n \"\"\"Async host resolver based on aiodns.\"\"\"\n\n _cached_hosts = {}\n _ip_hosts = [\n 'https://wtfismyip.com/text',\n 'http://api.ipify.org/',\n 'http://ipinfo.io/ip',\n 'http://ipv4.icanhazip.com/',\n 'http://myexternalip.com/raw',\n 'http://ipinfo.io/ip',\n 'http://ifconfig.io/ip',\n ]\n # the list of resolvers will point a copy of original one\n _temp_host = []\n\n def __init__(self, timeout=5, loop=None):\n self._timeout = timeout\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = aiodns.DNSResolver(loop=self._loop)\n\n @staticmethod\n def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n host = '.'.join(f'{int(n)}' for n in host.split('.'))\n ipaddress.IPv4Address(host)\n except (ipaddress.AddressValueError, ValueError):\n return False\n else:\n return True\n\n @staticmethod\n def get_ip_info(ip):\n \"\"\"Return geo information about IP address.\n\n `code` - ISO country code\n `name` - Full name of country\n `region_code` - ISO region code\n `region_name` - Full name of region\n `city_name` - Full name of city\n \"\"\"\n # from pprint import pprint\n try:\n ipInfo = _mmdb_reader.get(ip) or {}\n except (maxminddb.errors.InvalidDatabaseError, ValueError):\n ipInfo = {}\n\n code, name = '--', 'Unknown'\n city_name, region_code, region_name = ('Unknown',) * 3\n if 'country' in ipInfo:\n code = ipInfo['country']['iso_code']\n name = ipInfo['country']['names']['en']\n elif 'continent' in ipInfo:\n code = ipInfo['continent']['code']\n name = ipInfo['continent']['names']['en']\n if 'city' in ipInfo:\n city_name = ipInfo['city']['names']['en']\n if 'subdivisions' in ipInfo:\n region_code = ipInfo['subdivisions'][0]['iso_code']\n region_name = ipInfo['subdivisions'][0]['names']['en']\n return GeoData(code, name, region_code, region_name, city_name)\n\n def _pop_random_ip_host(self):\n host = random.choice(self._temp_host)\n self._temp_host.remove(host)\n return host\n\n async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n # make a copy of original one to temp one\n # so original one will stay no change\n self._temp_host = self._ip_hosts.copy()\n while self._temp_host:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n\n async def resolve(self, host, port=80, family=None, qtype='A', logging=True):\n \"\"\"Return resolving IP address(es) from host name.\"\"\"\n if self.host_is_ip(host):\n return host\n\n _host = self._cached_hosts.get(host)\n if _host:\n return _host\n\n resp = await self._resolve(host, qtype)\n\n if resp:\n hosts = [\n {\n 'hostname': host,\n 'host': r.host,\n 'port': port,\n 'family': family,\n 'proto': socket.IPPROTO_IP,\n 'flags': socket.AI_NUMERICHOST,\n }\n for r in resp\n ]\n if family:\n self._cached_hosts[host] = hosts\n else:\n self._cached_hosts[host] = hosts[0]['host']\n if logging:\n log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))\n else:\n if logging:\n log.warning('%s: Could not resolve host' % host)\n return self._cached_hosts.get(host)\n\n async def _resolve(self, host, qtype):\n try:\n resp = await asyncio.wait_for(\n self._resolver.query(host, qtype), timeout=self._timeout\n )\n except (aiodns.error.DNSError, asyncio.TimeoutError):\n raise ResolveError\n else:\n return resp" }, { "identifier": "Server", "path": "proxyhub/server.py", "snippet": "class Server:\n \"\"\"Server distributes incoming requests to a pool of found proxies.\"\"\"\n\n def __init__(\n self,\n host,\n port,\n proxies,\n timeout=8,\n max_tries=3,\n min_queue=5,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n prefer_connect=False,\n http_allowed_codes=None,\n backlog=100,\n loop=None,\n **kwargs,\n ):\n self.host = host\n self.port = int(port)\n self._loop = loop or asyncio.get_event_loop()\n self._timeout = timeout\n self._max_tries = max_tries\n self._backlog = backlog\n self._prefer_connect = prefer_connect\n\n self._server = None\n self._connections = {}\n self._proxy_pool = ProxyPool(\n proxies, min_req_proxy, max_error_rate, max_resp_time, min_queue\n )\n self._resolver = Resolver(loop=self._loop)\n self._http_allowed_codes = http_allowed_codes or []\n\n def start(self):\n\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(self._server.sockets[0].getsockname())\n )\n\n def stop(self):\n if not self._server:\n return\n for conn in self._connections:\n if not conn.done():\n conn.cancel()\n self._server.close()\n if not self._loop.is_running():\n self._loop.run_until_complete(self._server.wait_closed())\n # Time to close the running futures in self._connections\n self._loop.run_until_complete(asyncio.sleep(0.5))\n self._server = None\n self._loop.stop()\n log.info('Server is stopped')\n\n def _accept(self, client_reader, client_writer):\n def _on_completion(f):\n reader, writer = self._connections.pop(f)\n writer.close()\n log.debug('client: %d; closed' % id(client_reader))\n try:\n exc = f.exception()\n except asyncio.CancelledError:\n log.debug('CancelledError in server._handle:_on_completion')\n exc = None\n if exc:\n if isinstance(exc, NoProxyError):\n self.stop()\n else:\n raise exc\n\n f = asyncio.ensure_future(self._handle(client_reader, client_writer))\n f.add_done_callback(_on_completion)\n self._connections[f] = (client_reader, client_writer)\n\n async def _handle(self, client_reader, client_writer):\n log.debug(\n 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),)\n )\n\n request, headers = await self._parse_request(client_reader)\n scheme = self._identify_scheme(headers)\n client = id(client_reader)\n log.debug(\n 'client: %d; request: %s; headers: %s; scheme: %s'\n % (client, request, headers, scheme)\n )\n\n # API for controlling proxyhub2\n if headers['Host'] == 'proxycontrol':\n _api, _operation, _params = headers['Path'].split('/', 5)[3:]\n if _api == 'api':\n if _operation == 'remove':\n proxy_host, proxy_port = _params.split(':', 1)\n self._proxy_pool.remove(proxy_host, int(proxy_port))\n log.debug(\n 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s'\n % (client, request, headers, scheme, proxy_host, proxy_port)\n )\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n elif _operation == 'history':\n query_type, url = _params.split(':', 1)\n if query_type == 'url':\n previous_proxy = history.get(\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{url}\"\n )\n if previous_proxy is None:\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n else:\n previous_proxy_bytestring = (\n '{\"proxy\": \"%s\"}' % previous_proxy\n ).encode()\n client_writer.write(b'HTTP/1.1 200 OK\\r\\n')\n client_writer.write(b'Content-Type: application/json\\r\\n')\n client_writer.write(\n f\"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\\r\\n\"\n )\n client_writer.write(b'Access-Control-Allow-Origin: *\\r\\n')\n client_writer.write(\n b'Access-Control-Allow-Credentials: true\\r\\n\\r\\n'\n )\n\n client_writer.write(previous_proxy_bytestring + b'\\r\\n')\n await client_writer.drain()\n return\n\n for attempt in range(self._max_tries):\n stime, err = 0, None\n proxy = await self._proxy_pool.get(scheme)\n proto = self._choice_proto(proxy, scheme)\n log.debug(\n 'client: %d; attempt: %d; proxy: %s; proto: %s'\n % (client, attempt, proxy, proto)\n )\n\n try:\n await proxy.connect()\n\n if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):\n host = headers.get('Host')\n port = headers.get('Port', 80)\n try:\n ip = await self._resolver.resolve(host)\n except ResolveError:\n return\n proxy.ngtr = proto\n await proxy.ngtr.negotiate(host=host, port=port, ip=ip)\n if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):\n client_writer.write(CONNECTED)\n await client_writer.drain()\n else: # HTTP\n await proxy.send(request)\n else: # proto: HTTP & HTTPS\n await proxy.send(request)\n\n history[\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{headers['Path']}\"\n ] = (proxy.host + ':' + str(proxy.port))\n inject_resp_header = {\n 'headers': {'X-Proxy-Info': proxy.host + ':' + str(proxy.port)}\n }\n\n stime = time.time()\n stream = [\n asyncio.ensure_future(\n self._stream(reader=client_reader, writer=proxy.writer)\n ),\n asyncio.ensure_future(\n self._stream(\n reader=proxy.reader,\n writer=client_writer,\n scheme=scheme,\n inject=inject_resp_header,\n )\n ),\n ]\n await asyncio.gather(*stream, loop=self._loop)\n except asyncio.CancelledError:\n log.debug('Cancelled in server._handle')\n break\n except (\n ProxyTimeoutError,\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n log.debug('client: %d; error: %r' % (client, e))\n continue\n except ErrorOnStream as e:\n log.debug(\n 'client: %d; error: %r; EOF: %s'\n % (client, e, client_reader.at_eof())\n )\n for task in stream:\n if not task.done():\n task.cancel()\n if client_reader.at_eof() and 'Timeout' in repr(e):\n # Proxy may not be able to receive EOF and weel be raised a\n # TimeoutError, but all the data has already successfully\n # returned, so do not consider this error of proxy\n break\n err = e\n if scheme == 'HTTPS': # SSL Handshake probably failed\n break\n else:\n break\n finally:\n proxy.log(request.decode(), stime, err=err)\n proxy.close()\n self._proxy_pool.put(proxy)\n\n async def _parse_request(self, reader, length=65536):\n request = await reader.read(length)\n headers = parse_headers(request)\n if headers['Method'] == 'POST' and request.endswith(b'\\r\\n\\r\\n'):\n # For aiohttp. POST data returns on second reading\n request += await reader.read(length)\n return request, headers\n\n def _identify_scheme(self, headers):\n if headers['Method'] == 'CONNECT':\n return 'HTTPS'\n else:\n return 'HTTP'\n\n def _choice_proto(self, proxy, scheme):\n if scheme == 'HTTP':\n if self._prefer_connect and ('CONNECT:80' in proxy.types):\n proto = 'CONNECT:80'\n else:\n relevant = {\n 'HTTP',\n 'CONNECT:80',\n 'SOCKS4',\n 'SOCKS5',\n } & proxy.types.keys()\n proto = relevant.pop()\n else: # HTTPS\n relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys()\n proto = relevant.pop()\n return proto\n\n async def _stream(self, reader, writer, length=65536, scheme=None, inject=None):\n checked = False\n\n try:\n while not reader.at_eof():\n data = await asyncio.wait_for(reader.read(length), self._timeout)\n if not data:\n writer.close()\n break\n elif scheme and not checked:\n self._check_response(data, scheme)\n\n if inject.get('headers') is not None and len(inject['headers']) > 0:\n data = self._inject_headers(data, scheme, inject['headers'])\n\n checked = True\n\n writer.write(data)\n await writer.drain()\n\n except (\n asyncio.TimeoutError,\n ConnectionResetError,\n OSError,\n ProxyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n raise ErrorOnStream(e)\n\n def _check_response(self, data, scheme):\n if scheme == 'HTTP' and self._http_allowed_codes:\n line = data.split(b'\\r\\n', 1)[0].decode()\n try:\n header = parse_status_line(line)\n except BadStatusLine:\n raise BadResponseError\n if header['Status'] not in self._http_allowed_codes:\n raise BadStatusError(\n '%r not in %r' % (header['Status'], self._http_allowed_codes)\n )\n\n def _inject_headers(self, data, scheme, headers):\n custom_lines = []\n\n if scheme == 'HTTP' or scheme == 'HTTPS':\n status_line, rest_lines = data.split(b'\\r\\n', 1)\n custom_lines.append(status_line)\n\n for k, v in headers.items():\n custom_lines.append(('%s: %s' % (k, v)).encode())\n\n custom_lines.append(rest_lines)\n data = b'\\r\\n'.join(custom_lines)\n\n return data" }, { "identifier": "IPPortPatternLine", "path": "proxyhub/utils.py", "snippet": "BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ndef get_headers(rv=False):\ndef get_all_ip(page):\ndef get_status_code(resp, start=9, stop=12):\ndef parse_status_line(line):\ndef parse_headers(headers):\ndef update_geoip_db():" } ]
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
13,041
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [
p if isinstance(p, Provider) else Provider(p)
3
2023-11-05 13:28:57+00:00
16k
TheFunny/ArisuAutoSweeper
module/device/method/minitouch.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n {'options': {'ENABLE': False}, 'func': 1}\n ]\n }\n \"\"\"\n func_list = {}\n\n @classmethod\n def when(cls, **kwargs):\n \"\"\"\n Args:\n **kwargs: Any option in AzurLaneConfig.\n\n Examples:\n @Config.when(USE_ONE_CLICK_RETIREMENT=True)\n def retire_ships(self, amount=None, rarity=None):\n pass\n\n @Config.when(USE_ONE_CLICK_RETIREMENT=False)\n def retire_ships(self, amount=None, rarity=None):\n pass\n \"\"\"\n from module.logger import logger\n options = kwargs\n\n def decorate(func):\n name = func.__name__\n data = {'options': options, 'func': func}\n if name not in cls.func_list:\n cls.func_list[name] = [data]\n else:\n override = False\n for record in cls.func_list[name]:\n if record['options'] == data['options']:\n record['func'] = data['func']\n override = True\n if not override:\n cls.func_list[name].append(data)\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Args:\n self: ModuleBase instance.\n *args:\n **kwargs:\n \"\"\"\n for record in cls.func_list[name]:\n\n flag = [value is None or self.config.__getattribute__(key) == value\n for key, value in record['options'].items()]\n if not all(flag):\n continue\n\n return record['func'](self, *args, **kwargs)\n\n logger.warning(f'No option fits for {name}, using the last define func.')\n return func(self, *args, **kwargs)\n\n return wrapper\n\n return decorate" }, { "identifier": "cached_property", "path": "module/base/decorator.py", "snippet": "class cached_property(Generic[T]):\n \"\"\"\n cached-property from https://github.com/pydanny/cached-property\n Add typing support\n\n A property that is only computed once per instance and then replaces itself\n with an ordinary attribute. Deleting the attribute resets the property.\n Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76\n \"\"\"\n\n def __init__(self, func: Callable[..., T]):\n self.func = func\n\n def __get__(self, obj, cls) -> T:\n if obj is None:\n return self\n\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value" }, { "identifier": "del_cached_property", "path": "module/base/decorator.py", "snippet": "def del_cached_property(obj, name):\n \"\"\"\n Delete a cached property safely.\n\n Args:\n obj:\n name (str):\n \"\"\"\n try:\n del obj.__dict__[name]\n except KeyError:\n pass" }, { "identifier": "Timer", "path": "module/base/timer.py", "snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise it goes wrong, if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make alas run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n from module.logger import logger\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__" }, { "identifier": "Connection", "path": "module/device/connection.py", "snippet": "class Connection(ConnectionAttr):\n def __init__(self, config):\n \"\"\"\n Args:\n config (AzurLaneConfig, str): Name of the user config under ./config\n \"\"\"\n super().__init__(config)\n if not self.is_over_http:\n self.detect_device()\n\n # Connect\n self.adb_connect(self.serial)\n logger.attr('AdbDevice', self.adb)\n\n # Package\n if self.config.Emulator_PackageName == 'auto':\n self.detect_package()\n else:\n self.package = server_.to_package(self.config.Emulator_PackageName)\n # No set_server cause game client and UI language can be different\n # else:\n # set_server(self.package)\n logger.attr('Server', self.config.Emulator_PackageName)\n server_.server = self.config.Emulator_PackageName\n logger.attr('PackageName', self.package)\n server_.lang = self.config.Emulator_GameLanguage\n logger.attr('Lang', self.config.LANG)\n\n self.check_mumu_app_keep_alive()\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_command(self, cmd, timeout=10):\n \"\"\"\n Execute ADB commands in a subprocess,\n usually to be used when pulling or pushing large files.\n\n Args:\n cmd (list):\n timeout (int):\n\n Returns:\n str:\n \"\"\"\n cmd = list(map(str, cmd))\n cmd = [self.adb_binary, '-s', self.serial] + cmd\n logger.info(f'Execute: {cmd}')\n\n # Use shell=True to disable console window when using GUI.\n # Although, there's still a window when you stop running in GUI, which cause by gooey.\n # To disable it, edit gooey/gui/util/taskkill.py\n\n # No gooey anymore, just shell=False\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n try:\n stdout, stderr = process.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n logger.warning(f'TimeoutExpired when calling {cmd}, stdout={stdout}, stderr={stderr}')\n return stdout\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_command(self, cmd, timeout=10):\n logger.warning(\n f'adb_command() is not available when connecting over http: {self.serial}, '\n )\n raise RequestHumanTakeover\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to `adb -s <serial> shell <*cmd>`\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True and recvall=True\n socket if stream=True and recvall=False\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n if recvall:\n # bytes\n return recv_all(result)\n else:\n # socket\n return result\n else:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n result = remove_shell_warning(result)\n # str\n return result\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to http://127.0.0.1:7912/shell?command={command}\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout)\n # Already received all, so `recvall` is ignored\n result = remove_shell_warning(result.content)\n # bytes\n return result\n else:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout).output\n if rstrip:\n result = result.rstrip()\n result = remove_shell_warning(result)\n # str\n return result\n\n def adb_getprop(self, name):\n \"\"\"\n Get system property in Android, same as `getprop <name>`\n\n Args:\n name (str): Property name\n\n Returns:\n str:\n \"\"\"\n return self.adb_shell(['getprop', name]).strip()\n\n @cached_property\n def cpu_abi(self) -> str:\n \"\"\"\n Returns:\n str: arm64-v8a, armeabi-v7a, x86, x86_64\n \"\"\"\n abi = self.adb_getprop('ro.product.cpu.abi')\n if not len(abi):\n logger.error(f'CPU ABI invalid: \"{abi}\"')\n return abi\n\n @cached_property\n def sdk_ver(self) -> int:\n \"\"\"\n Android SDK/API levels, see https://apilevels.com/\n \"\"\"\n sdk = self.adb_getprop('ro.build.version.sdk')\n try:\n return int(sdk)\n except ValueError:\n logger.error(f'SDK version invalid: {sdk}')\n\n return 0\n\n @cached_property\n def is_avd(self):\n if get_serial_pair(self.serial)[0] is None:\n return False\n if 'ranchu' in self.adb_getprop('ro.hardware'):\n return True\n if 'goldfish' in self.adb_getprop('ro.hardware.audio.primary'):\n return True\n return False\n\n def check_mumu_app_keep_alive(self):\n if not self.is_mumu_family:\n return False\n\n res = self.adb_getprop('nemud.app_keep_alive')\n logger.attr('nemud.app_keep_alive', res)\n if res == '':\n # Empry property, might not be a mumu emulator or might be an old mumu\n return True\n elif res == 'false':\n # Disabled\n return True\n elif res == 'true':\n # https://mumu.163.com/help/20230802/35047_1102450.html\n logger.critical('请在MuMu模拟器设置内关闭 \"后台挂机时保活运行\"')\n raise RequestHumanTakeover\n else:\n logger.warning(f'Invalid nemud.app_keep_alive value: {res}')\n return False\n\n @cached_property\n def _nc_server_host_port(self):\n \"\"\"\n Returns:\n str, int, str, int:\n server_listen_host, server_listen_port, client_connect_host, client_connect_port\n \"\"\"\n # For BlueStacks hyper-v, use ADB reverse\n if self.is_bluestacks_hyperv:\n host = '127.0.0.1'\n logger.info(f'Connecting to BlueStacks hyper-v, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n # For emulators, listen on current host\n if self.is_emulator or self.is_over_http:\n try:\n host = socket.gethostbyname(socket.gethostname())\n except socket.gaierror as e:\n logger.error(e)\n logger.error(f'Unknown host name: {socket.gethostname()}')\n host = '127.0.0.1'\n if platform.system() == 'Linux' and host == '127.0.1.1':\n host = '127.0.0.1'\n logger.info(f'Connecting to local emulator, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n\n # For AVD instance\n if self.is_avd:\n return host, port, \"10.0.2.2\", port\n\n return host, port, host, port\n # For local network devices, listen on the host under the same network as target device\n if self.is_network_device:\n hosts = socket.gethostbyname_ex(socket.gethostname())[2]\n logger.info(f'Current hosts: {hosts}')\n ip = ipaddress.ip_address(self.serial.split(':')[0])\n for host in hosts:\n if ip in ipaddress.ip_interface(f'{host}/24').network:\n logger.info(f'Connecting to local network device, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n return host, port, host, port\n # For other devices, create an ADB reverse and listen on 127.0.0.1\n host = '127.0.0.1'\n logger.info(f'Connecting to unknown device, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n\n @cached_property\n def reverse_server(self):\n \"\"\"\n Setup a server on Alas, access it from emulator.\n This will bypass adb shell and be faster.\n \"\"\"\n del_cached_property(self, '_nc_server_host_port')\n host_port = self._nc_server_host_port\n logger.info(f'Reverse server listening on {host_port[0]}:{host_port[1]}, '\n f'client can send data to {host_port[2]}:{host_port[3]}')\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(host_port[:2])\n server.settimeout(5)\n server.listen(5)\n return server\n\n @cached_property\n def nc_command(self):\n \"\"\"\n Returns:\n list[str]: ['nc'] or ['busybox', 'nc']\n \"\"\"\n sdk = self.sdk_ver\n logger.info(f'sdk_ver: {sdk}')\n if sdk >= 28:\n # Android 9 emulators does not have `nc`, try `busybox nc`\n # BlueStacks Pie (Android 9) has `nc` but cannot send data, try `busybox nc` first\n trial = [\n ['busybox', 'nc'],\n ['nc'],\n ]\n else:\n trial = [\n ['nc'],\n ['busybox', 'nc'],\n ]\n for command in trial:\n # About 3ms\n result = self.adb_shell(command)\n # Result should be command help if success\n # `/system/bin/sh: nc: not found`\n if 'not found' in result:\n continue\n # `/system/bin/sh: busybox: inaccessible or not found\\n`\n if 'inaccessible' in result:\n continue\n logger.attr('nc command', command)\n return command\n\n logger.error('No `netcat` command available, please use screenshot methods without `_nc` suffix')\n raise RequestHumanTakeover\n\n def adb_shell_nc(self, cmd, timeout=5, chunk_size=262144):\n \"\"\"\n Args:\n cmd (list):\n timeout (int):\n chunk_size (int): Default to 262144\n\n Returns:\n bytes:\n \"\"\"\n # Server start listening\n server = self.reverse_server\n server.settimeout(timeout)\n # Client send data, waiting for server accept\n # <command> | nc 127.0.0.1 {port}\n cmd += [\"|\", *self.nc_command, *self._nc_server_host_port[2:]]\n stream = self.adb_shell(cmd, stream=True, recvall=False)\n try:\n # Server accept connection\n conn, conn_port = server.accept()\n except socket.timeout:\n output = recv_all(stream, chunk_size=chunk_size)\n logger.warning(str(output))\n raise AdbTimeout('reverse server accept timeout')\n\n # Server receive data\n data = recv_all(conn, chunk_size=chunk_size, recv_interval=0.001)\n\n # Server close connection\n conn.close()\n return data\n\n def adb_exec_out(self, cmd, serial=None):\n cmd.insert(0, 'exec-out')\n return self.adb_command(cmd, serial)\n\n def adb_forward(self, remote):\n \"\"\"\n Do `adb forward <local> <remote>`.\n choose a random port in FORWARD_PORT_RANGE or reuse an existing forward,\n and also remove redundant forwards.\n\n Args:\n remote (str):\n tcp:<port>\n localabstract:<unix domain socket name>\n localreserved:<unix domain socket name>\n localfilesystem:<unix domain socket name>\n dev:<character device name>\n jdwp:<process pid> (remote only)\n\n Returns:\n int: Port\n \"\"\"\n port = 0\n for forward in self.adb.forward_list():\n if forward.serial == self.serial and forward.remote == remote and forward.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse forward: {forward}')\n port = int(forward.local[4:])\n else:\n logger.info(f'Remove redundant forward: {forward}')\n self.adb_forward_remove(forward.local)\n\n if port:\n return port\n else:\n # Create new forward\n port = random_port(self.config.FORWARD_PORT_RANGE)\n forward = ForwardItem(self.serial, f'tcp:{port}', remote)\n logger.info(f'Create forward: {forward}')\n self.adb.forward(forward.local, forward.remote)\n return port\n\n def adb_reverse(self, remote):\n port = 0\n for reverse in self.adb.reverse_list():\n if reverse.remote == remote and reverse.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse reverse: {reverse}')\n port = int(reverse.local[4:])\n else:\n logger.info(f'Remove redundant forward: {reverse}')\n self.adb_forward_remove(reverse.local)\n\n if port:\n return port\n else:\n # Create new reverse\n port = random_port(self.config.FORWARD_PORT_RANGE)\n reverse = ReverseItem(f'tcp:{port}', remote)\n logger.info(f'Create reverse: {reverse}')\n self.adb.reverse(reverse.local, reverse.remote)\n return port\n\n def adb_forward_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> forward --remove <local>`\n More about the commands send to ADB server, see:\n https://cs.android.com/android/platform/superproject/+/master:packages/modules/adb/SERVICES.TXT\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n list_cmd = f\"host-serial:{self.serial}:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_reverse_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> reverse --remove <local>`\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n c.send_command(f\"host:transport:{self.serial}\")\n c.check_okay()\n list_cmd = f\"reverse:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_push(self, local, remote):\n \"\"\"\n Args:\n local (str):\n remote (str):\n\n Returns:\n str:\n \"\"\"\n cmd = ['push', local, remote]\n return self.adb_command(cmd)\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_connect(self, serial):\n \"\"\"\n Connect to a serial, try 3 times at max.\n If there's an old ADB server running while Alas is using a newer one, which happens on Chinese emulators,\n the first connection is used to kill the other one, and the second is the real connect.\n\n Args:\n serial (str):\n\n Returns:\n bool: If success\n \"\"\"\n # Disconnect offline device before connecting\n for device in self.list_device():\n if device.status == 'offline':\n logger.warning(f'Device {serial} is offline, disconnect it before connecting')\n self.adb_disconnect(serial)\n elif device.status == 'unauthorized':\n logger.error(f'Device {serial} is unauthorized, please accept ADB debugging on your device')\n elif device.status == 'device':\n pass\n else:\n logger.warning(f'Device {serial} is is having a unknown status: {device.status}')\n\n # Skip for emulator-5554\n if 'emulator-' in serial:\n logger.info(f'\"{serial}\" is a `emulator-*` serial, skip adb connect')\n return True\n if re.match(r'^[a-zA-Z0-9]+$', serial):\n logger.info(f'\"{serial}\" seems to be a Android serial, skip adb connect')\n return True\n\n # Try to connect\n for _ in range(3):\n msg = self.adb_client.connect(serial)\n logger.info(msg)\n if 'connected' in msg:\n # Connected to 127.0.0.1:59865\n # Already connected to 127.0.0.1:59865\n return True\n elif 'bad port' in msg:\n # bad port number '598265' in '127.0.0.1:598265'\n logger.error(msg)\n possible_reasons('Serial incorrect, might be a typo')\n raise RequestHumanTakeover\n elif '(10061)' in msg:\n # cannot connect to 127.0.0.1:55555:\n # No connection could be made because the target machine actively refused it. (10061)\n logger.info(msg)\n logger.warning('No such device exists, please restart the emulator or set a correct serial')\n raise EmulatorNotRunningError\n\n # Failed to connect\n logger.warning(f'Failed to connect {serial} after 3 trial, assume connected')\n self.detect_device()\n return False\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_connect(self, serial):\n # No adb connect if over http\n return True\n\n def adb_disconnect(self, serial):\n msg = self.adb_client.disconnect(serial)\n if msg:\n logger.info(msg)\n\n del_cached_property(self, 'hermit_session')\n del_cached_property(self, 'droidcast_session')\n del_cached_property(self, 'minitouch_builder')\n del_cached_property(self, 'reverse_server')\n\n def adb_restart(self):\n \"\"\"\n Reboot adb client\n \"\"\"\n logger.info('Restart adb')\n # Kill current client\n self.adb_client.server_kill()\n # Init adb client\n del_cached_property(self, 'adb_client')\n _ = self.adb_client\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_reconnect(self):\n \"\"\"\n Reboot adb client if no device found, otherwise try reconnecting device.\n \"\"\"\n if self.config.Emulator_AdbRestart and len(self.list_device()) == 0:\n # Restart Adb\n self.adb_restart()\n # Connect to device\n self.adb_connect(self.serial)\n self.detect_device()\n else:\n self.adb_disconnect(self.serial)\n self.adb_connect(self.serial)\n self.detect_device()\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_reconnect(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'adb_reconnect() is skipped, you may need to restart ATX manually'\n )\n\n def install_uiautomator2(self):\n \"\"\"\n Init uiautomator2 and remove minicap.\n \"\"\"\n logger.info('Install uiautomator2')\n init = u2.init.Initer(self.adb, loglevel=logging.DEBUG)\n # MuMu X has no ro.product.cpu.abi, pick abi from ro.product.cpu.abilist\n if init.abi not in ['x86_64', 'x86', 'arm64-v8a', 'armeabi-v7a', 'armeabi']:\n init.abi = init.abis[0]\n init.set_atx_agent_addr('127.0.0.1:7912')\n try:\n init.install()\n except ConnectionError:\n u2.init.GITHUB_BASEURL = 'http://tool.appetizer.io/openatx'\n init.install()\n self.uninstall_minicap()\n\n def uninstall_minicap(self):\n \"\"\" minicap can't work or will send compressed images on some emulators. \"\"\"\n logger.info('Removing minicap')\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap\"])\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap.so\"])\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def restart_atx(self):\n \"\"\"\n Minitouch supports only one connection at a time.\n Restart ATX to kick the existing one.\n \"\"\"\n logger.info('Restart ATX')\n atx_agent_path = '/data/local/tmp/atx-agent'\n self.adb_shell([atx_agent_path, 'server', '--stop'])\n self.adb_shell([atx_agent_path, 'server', '--nouia', '-d', '--addr', '127.0.0.1:7912'])\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def restart_atx(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'restart_atx() is skipped, you may need to restart ATX manually'\n )\n\n @staticmethod\n def sleep(second):\n \"\"\"\n Args:\n second(int, float, tuple):\n \"\"\"\n time.sleep(ensure_time(second))\n\n _orientation_description = {\n 0: 'Normal',\n 1: 'HOME key on the right',\n 2: 'HOME key on the top',\n 3: 'HOME key on the left',\n }\n orientation = 0\n\n @retry\n def get_orientation(self):\n \"\"\"\n Rotation of the phone\n\n Returns:\n int:\n 0: 'Normal'\n 1: 'HOME key on the right'\n 2: 'HOME key on the top'\n 3: 'HOME key on the left'\n \"\"\"\n _DISPLAY_RE = re.compile(\n r'.*DisplayViewport{.*valid=true, .*orientation=(?P<orientation>\\d+), .*deviceWidth=(?P<width>\\d+), deviceHeight=(?P<height>\\d+).*'\n )\n output = self.adb_shell(['dumpsys', 'display'])\n\n res = _DISPLAY_RE.search(output, 0)\n\n if res:\n o = int(res.group('orientation'))\n if o in Connection._orientation_description:\n pass\n else:\n o = 0\n logger.warning(f'Invalid device orientation: {o}, assume it is normal')\n else:\n o = 0\n logger.warning('Unable to get device orientation, assume it is normal')\n\n self.orientation = o\n logger.attr('Device Orientation', f'{o} ({Connection._orientation_description.get(o, \"Unknown\")})')\n return o\n\n @retry\n def list_device(self):\n \"\"\"\n Returns:\n SelectedGrids[AdbDeviceWithStatus]:\n \"\"\"\n devices = []\n try:\n with self.adb_client._connect() as c:\n c.send_command(\"host:devices\")\n c.check_okay()\n output = c.read_string_block()\n for line in output.splitlines():\n parts = line.strip().split(\"\\t\")\n if len(parts) != 2:\n continue\n device = AdbDeviceWithStatus(self.adb_client, parts[0], parts[1])\n devices.append(device)\n except ConnectionResetError as e:\n # Happens only on CN users.\n # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。\n logger.error(e)\n if '强迫关闭' in str(e):\n logger.critical('无法连接至ADB服务,请关闭UU加速器、原神私服、以及一些劣质代理软件。'\n '它们会劫持电脑上所有的网络连接,包括Alas与模拟器之间的本地连接。')\n return SelectedGrids(devices)\n\n def detect_device(self):\n \"\"\"\n Find available devices\n If serial=='auto' and only 1 device detected, use it\n \"\"\"\n logger.hr('Detect device')\n logger.info('Here are the available devices, '\n 'copy to Alas.Emulator.Serial to use it or set Alas.Emulator.Serial=\"auto\"')\n devices = self.list_device()\n\n # Show available devices\n available = devices.select(status='device')\n for device in available:\n logger.info(device.serial)\n if not len(available):\n logger.info('No available devices')\n\n # Show unavailable devices if having any\n unavailable = devices.delete(available)\n if len(unavailable):\n logger.info('Here are the devices detected but unavailable')\n for device in unavailable:\n logger.info(f'{device.serial} ({device.status})')\n\n # Auto device detection\n if self.config.Emulator_Serial == 'auto':\n if available.count == 0:\n logger.critical('No available device found, auto device detection cannot work, '\n 'please set an exact serial in Alas.Emulator.Serial instead of using \"auto\"')\n raise RequestHumanTakeover\n elif available.count == 1:\n logger.info(f'Auto device detection found only one device, using it')\n self.serial = devices[0].serial\n del_cached_property(self, 'adb')\n else:\n logger.critical('Multiple devices found, auto device detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.Serial')\n raise RequestHumanTakeover\n\n # Handle LDPlayer\n # LDPlayer serial jumps between `127.0.0.1:5555+{X}` and `emulator-5554+{X}`\n port_serial, emu_serial = get_serial_pair(self.serial)\n if port_serial and emu_serial:\n # Might be LDPlayer, check connected devices\n port_device = devices.select(serial=port_serial).first_or_none()\n emu_device = devices.select(serial=emu_serial).first_or_none()\n if port_device and emu_device:\n # Paired devices found, check status to get the correct one\n if port_device.status == 'device' and emu_device.status == 'offline':\n self.serial = port_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif port_device.status == 'offline' and emu_device.status == 'device':\n self.serial = emu_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif not devices.select(serial=self.serial):\n # Current serial not found\n if port_device and not emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {port_serial} found. '\n f'Using serial: {port_serial}')\n self.serial = port_serial\n if not port_device and emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {emu_serial} found. '\n f'Using serial: {emu_serial}')\n self.serial = emu_serial\n\n @retry\n def list_package(self, show_log=True):\n \"\"\"\n Find all packages on device.\n Use dumpsys first for faster.\n \"\"\"\n # 80ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(r'dumpsys package | grep \"Package \\[\"')\n packages = re.findall(r'Package \\[([^\\s]+)\\]', output)\n if len(packages):\n return packages\n\n # 200ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(['pm', 'list', 'packages'])\n packages = re.findall(r'package:([^\\s]+)', output)\n return packages\n\n def list_azurlane_packages(self, show_log=True):\n \"\"\"\n Args:\n show_log:\n\n Returns:\n list[str]: List of package names\n \"\"\"\n packages = self.list_package(show_log=show_log)\n packages = [p for p in packages if p in server_.VALID_PACKAGE]\n return packages\n\n def detect_package(self, set_config=True):\n \"\"\"\n Show all possible packages with the given keyword on this device.\n \"\"\"\n logger.hr('Detect package')\n packages = self.list_azurlane_packages()\n\n # Show packages\n logger.info(f'Here are the available packages in device \"{self.serial}\", '\n f'copy to Alas.Emulator.PackageName to use it')\n if len(packages):\n for package in packages:\n logger.info(package)\n else:\n logger.info(f'No available packages on device \"{self.serial}\"')\n\n # Auto package detection\n if len(packages) == 0:\n logger.critical(f'No Blue Archive package found, '\n f'please confirm Blue Archive has been installed on device \"{self.serial}\"')\n raise RequestHumanTakeover\n if len(packages) == 1:\n logger.info('Auto package detection found only one package, using it')\n self.package = packages[0]\n # Set config\n if set_config:\n self.config.Emulator_PackageName = server_.to_server(self.package)\n # Set server\n # logger.info('Server changed, release resources')\n # set_server(self.package)\n else:\n logger.critical(\n f'Multiple Blue Archive packages found, auto package detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.PackageName')\n raise RequestHumanTakeover" }, { "identifier": "RETRY_TRIES", "path": "module/device/method/utils.py", "snippet": "RETRY_TRIES = 5" }, { "identifier": "retry_sleep", "path": "module/device/method/utils.py", "snippet": "def retry_sleep(trial):\n # First trial\n if trial == 0:\n pass\n # Failed once, fast retry\n elif trial == 1:\n pass\n # Failed twice\n elif trial == 2:\n time.sleep(1)\n # Failed more\n else:\n time.sleep(RETRY_DELAY)" }, { "identifier": "handle_adb_error", "path": "module/device/method/utils.py", "snippet": "def handle_adb_error(e):\n \"\"\"\n Args:\n e (Exception):\n\n Returns:\n bool: If should retry\n \"\"\"\n text = str(e)\n if 'not found' in text:\n # When you call `adb disconnect <serial>`\n # Or when adb server was killed (low possibility)\n # AdbError(device '127.0.0.1:59865' not found)\n logger.error(e)\n return True\n elif 'timeout' in text:\n # AdbTimeout(adb read timeout)\n logger.error(e)\n return True\n elif 'closed' in text:\n # AdbError(closed)\n # Usually after AdbTimeout(adb read timeout)\n # Disconnect and re-connect should fix this.\n logger.error(e)\n return True\n elif 'device offline' in text:\n # AdbError(device offline)\n # When a device that has been connected wirelessly is disconnected passively,\n # it does not disappear from the adb device list,\n # but will be displayed as offline.\n # In many cases, such as disconnection and recovery caused by network fluctuations,\n # or after VMOS reboot when running Alas on a phone,\n # the device is still available, but it needs to be disconnected and re-connected.\n logger.error(e)\n return True\n elif 'is offline' in text:\n # RuntimeError: USB device 127.0.0.1:7555 is offline\n # Raised by uiautomator2 when current adb service is killed by another version of adb service.\n logger.error(e)\n return True\n elif 'unknown host service' in text:\n # AdbError(unknown host service)\n # Another version of ADB service started, current ADB service has been killed.\n # Usually because user opened a Chinese emulator, which uses ADB from the Stone Age.\n logger.error(e)\n return True\n else:\n # AdbError()\n logger.exception(e)\n possible_reasons(\n 'If you are using BlueStacks or LD player or WSA, please enable ADB in the settings of your emulator',\n 'Emulator died, please restart emulator',\n 'Serial incorrect, no such device exists or emulator is not running'\n )\n return False" }, { "identifier": "RequestHumanTakeover", "path": "module/exception.py", "snippet": "class RequestHumanTakeover(Exception):\n # Request human takeover\n # Alas is unable to handle such error, probably because of wrong settings.\n pass" }, { "identifier": "ScriptError", "path": "module/exception.py", "snippet": "class ScriptError(Exception):\n # This is likely to be a mistake of developers, but sometimes a random issue\n pass" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" } ]
import asyncio import json import re import socket import time import websockets from functools import wraps from typing import List from adbutils.errors import AdbError from uiautomator2 import _Service from module.base.decorator import Config, cached_property, del_cached_property from module.base.timer import Timer from module.base.utils import * from module.device.connection import Connection from module.device.method.utils import RETRY_TRIES, retry_sleep, handle_adb_error from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
12,634
if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e:
def random_normal_distribution(a, b, n=5): output = np.mean(np.random.uniform(a, b, size=n)) return output def random_theta(): theta = np.random.uniform(0, 2 * np.pi) return np.array([np.sin(theta), np.cos(theta)]) def random_rho(dis): return random_normal_distribution(-dis, dis) def insert_swipe(p0, p3, speed=15, min_distance=10): """ Insert way point from start to end. First generate a cubic bézier curve Args: p0: Start point. p3: End point. speed: Average move speed, pixels per 10ms. min_distance: Returns: list[list[int]]: List of points. Examples: > insert_swipe((400, 400), (600, 600), speed=20) [[400, 400], [406, 406], [416, 415], [429, 428], [444, 442], [462, 459], [481, 478], [504, 500], [527, 522], [545, 540], [560, 557], [573, 570], [584, 582], [592, 590], [597, 596], [600, 600]] """ p0 = np.array(p0) p3 = np.array(p3) # Random control points in Bézier curve distance = np.linalg.norm(p3 - p0) p1 = 2 / 3 * p0 + 1 / 3 * p3 + random_theta() * random_rho(distance * 0.1) p2 = 1 / 3 * p0 + 2 / 3 * p3 + random_theta() * random_rho(distance * 0.1) # Random `t` on Bézier curve, sparse in the middle, dense at start and end segments = max(int(distance / speed) + 1, 5) lower = random_normal_distribution(-85, -60) upper = random_normal_distribution(80, 90) theta = np.arange(lower + 0., upper + 0.0001, (upper - lower) / segments) ts = np.sin(theta / 180 * np.pi) ts = np.sign(ts) * abs(ts) ** 0.9 ts = (ts - min(ts)) / (max(ts) - min(ts)) # Generate cubic Bézier curve points = [] prev = (-100, -100) for t in ts: point = p0 * (1 - t) ** 3 + 3 * p1 * t * (1 - t) ** 2 + 3 * p2 * t ** 2 * (1 - t) + p3 * t ** 3 point = point.astype(int).tolist() if np.linalg.norm(np.subtract(point, prev)) < min_distance: continue points.append(point) prev = point # Delete nearing points if len(points[1:]): distance = np.linalg.norm(np.subtract(points[1:], points[0]), axis=1) mask = np.append(True, distance > min_distance) points = np.array(points)[mask].tolist() else: points = [p0, p3] return points class Command: def __init__( self, operation: str, contact: int = 0, x: int = 0, y: int = 0, ms: int = 10, pressure: int = 100 ): """ See https://github.com/openstf/minitouch#writable-to-the-socket Args: operation: c, r, d, m, u, w contact: x: y: ms: pressure: """ self.operation = operation self.contact = contact self.x = x self.y = y self.ms = ms self.pressure = pressure def to_minitouch(self) -> str: """ String that write into minitouch socket """ if self.operation == 'c': return f'{self.operation}\n' elif self.operation == 'r': return f'{self.operation}\n' elif self.operation == 'd': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'm': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'u': return f'{self.operation} {self.contact}\n' elif self.operation == 'w': return f'{self.operation} {self.ms}\n' else: return '' def to_atx_agent(self, max_x=1280, max_y=720) -> str: """ Dict that send to atx-agent, $DEVICE_URL/minitouch See https://github.com/openatx/atx-agent#minitouch%E6%93%8D%E4%BD%9C%E6%96%B9%E6%B3%95 """ x, y = self.x / max_x, self.y / max_y if self.operation == 'c': out = dict(operation=self.operation) elif self.operation == 'r': out = dict(operation=self.operation) elif self.operation == 'd': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'm': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'u': out = dict(operation=self.operation, index=self.contact) elif self.operation == 'w': out = dict(operation=self.operation, milliseconds=self.ms) else: out = dict() return json.dumps(out) class CommandBuilder: """Build command str for minitouch. You can use this, to custom actions as you wish:: with safe_connection(_DEVICE_ID) as connection: builder = CommandBuilder() builder.down(0, 400, 400, 50) builder.commit() builder.move(0, 500, 500, 50) builder.commit() builder.move(0, 800, 400, 50) builder.commit() builder.up(0) builder.commit() builder.publish(connection) """ DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e:
if handle_adb_error(e):
7
2023-11-01 07:09:45+00:00
16k
BrianPugh/cyclopts
tests/test_help.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)\n\n # Everything below must be kw_only\n\n default_command: Optional[Callable] = field(default=None, converter=_validate_default_command, kw_only=True)\n default_parameter: Optional[Parameter] = field(default=None, kw_only=True)\n\n version: Union[None, str, Callable] = field(factory=_default_version, kw_only=True)\n version_flags: Tuple[str, ...] = field(\n default=[\"--version\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n show: bool = field(default=True, kw_only=True)\n\n help_flags: Tuple[str, ...] = field(\n default=[\"--help\", \"-h\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n # This can ONLY ever be Tuple[Union[Group, str], ...] due to converter.\n # The other types is to make mypy happy for Cyclopts users.\n group: Union[Group, str, Tuple[Union[Group, str], ...]] = field(\n default=None, converter=to_tuple_converter, kw_only=True\n )\n\n group_arguments: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_arguments()),\n kw_only=True,\n )\n group_parameters: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_parameters()),\n kw_only=True,\n )\n group_commands: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_commands()),\n kw_only=True,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n validator: List[Callable] = field(default=None, converter=to_list_converter, kw_only=True)\n\n ######################\n # Private Attributes #\n ######################\n # Maps CLI-name of a command to a function handle.\n _commands: Dict[str, \"App\"] = field(init=False, factory=dict)\n\n _parents: List[\"App\"] = field(init=False, factory=list)\n\n _meta: \"App\" = field(init=False, default=None)\n _meta_parent: \"App\" = field(init=False, default=None)\n\n def __attrs_post_init__(self):\n if self.help_flags:\n self.command(\n self.help_print,\n name=self.help_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display this message and exit.\",\n )\n if self.version_flags:\n self.command(\n self.version_print,\n name=self.version_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display application version.\",\n )\n\n ###########\n # Methods #\n ###########\n\n @property\n def name(self) -> Tuple[str, ...]:\n \"\"\"Application name(s). Dynamically derived if not previously set.\"\"\"\n if self._name:\n return self._name\n elif self.default_command is None:\n name = Path(sys.argv[0]).name\n if name == \"__main__.py\":\n name = _get_root_module_name()\n return (name,)\n else:\n return (_format_name(self.default_command.__name__),)\n\n @property\n def help(self) -> str:\n if self._help is not None:\n return self._help\n elif self.default_command is None:\n # Try and fallback to a meta-app docstring.\n if self._meta is None:\n return \"\"\n else:\n return self.meta.help\n elif self.default_command.__doc__ is None:\n return \"\"\n else:\n return self.default_command.__doc__\n\n @help.setter\n def help(self, value):\n self._help = value\n\n def version_print(self) -> None:\n \"\"\"Print the application version.\"\"\"\n print(self.version() if callable(self.version) else self.version)\n\n def __getitem__(self, key: str) -> \"App\":\n \"\"\"Get the subapp from a command string.\n\n All commands get registered to Cyclopts as subapps.\n The actual function handler is at ``app[key].default_command``.\n \"\"\"\n if self._meta:\n with suppress(KeyError):\n return self.meta[key]\n return self._commands[key]\n\n def __contains__(self, k: str) -> bool:\n if k in self._commands:\n return True\n if self._meta_parent:\n return k in self._meta_parent\n return False\n\n @property\n def meta(self) -> \"App\":\n if self._meta is None:\n self._meta = type(self)(\n group_commands=copy(self.group_commands),\n group_arguments=copy(self.group_arguments),\n group_parameters=copy(self.group_parameters),\n )\n self._meta._meta_parent = self\n return self._meta\n\n def _parse_command_chain(self, tokens):\n command_chain = []\n app = self\n apps = [app]\n unused_tokens = tokens\n\n command_mapping = _combined_meta_command_mapping(app)\n\n for i, token in enumerate(tokens):\n if token in self.help_flags:\n break\n try:\n app = command_mapping[token]\n apps.append(app)\n unused_tokens = tokens[i + 1 :]\n except KeyError:\n break\n command_chain.append(token)\n command_mapping = _combined_meta_command_mapping(app)\n\n return command_chain, apps, unused_tokens\n\n def command(\n self,\n obj: Optional[Callable] = None,\n name: Union[None, str, Iterable[str]] = None,\n **kwargs,\n ) -> Callable:\n \"\"\"Decorator to register a function as a CLI command.\n\n Parameters\n ----------\n obj: Optional[Callable]\n Function or :class:`App` to be registered as a command.\n name: Union[None, str, Iterable[str]]\n Name(s) to register the ``obj`` to.\n If not provided, defaults to:\n\n * If registering an :class:`App`, then the app's name.\n * If registering a function, then the function's name.\n `**kwargs`\n Any argument that :class:`App` can take.\n \"\"\"\n if obj is None: # Called ``@app.command(...)``\n return partial(self.command, name=name, **kwargs)\n\n if isinstance(obj, App):\n app = obj\n\n if app._name is None and name is None:\n raise ValueError(\"Sub-app MUST have a name specified.\")\n\n if kwargs:\n raise ValueError(\"Cannot supplied additional configuration when registering a sub-App.\")\n else:\n validate_command(obj)\n kwargs.setdefault(\"help_flags\", [])\n kwargs.setdefault(\"version_flags\", [])\n if \"group_commands\" not in kwargs:\n kwargs[\"group_commands\"] = copy(self.group_commands)\n if \"group_parameters\" not in kwargs:\n kwargs[\"group_parameters\"] = copy(self.group_parameters)\n if \"group_arguments\" not in kwargs:\n kwargs[\"group_arguments\"] = copy(self.group_arguments)\n app = App(default_command=obj, **kwargs)\n # app.name is handled below\n\n if name is None:\n name = app.name\n else:\n app._name = name\n\n for n in to_tuple_converter(name):\n if n in self:\n raise CommandCollisionError(f'Command \"{n}\" already registered.')\n\n # Warning: app._name may not align with command name\n self._commands[n] = app\n\n app._parents.append(self)\n\n return obj\n\n def default(\n self,\n obj: Optional[Callable] = None,\n *,\n converter=None,\n validator=None,\n ):\n \"\"\"Decorator to register a function as the default action handler.\"\"\"\n if obj is None: # Called ``@app.default_command(...)``\n return partial(self.default, converter=converter, validator=validator)\n\n if isinstance(obj, App): # Registering a sub-App\n raise TypeError(\"Cannot register a sub-App to default.\")\n\n if self.default_command is not None:\n raise CommandCollisionError(f\"Default command previously set to {self.default_command}.\")\n\n validate_command(obj)\n self.default_command = obj\n if converter:\n self.converter = converter\n if validator:\n self.validator = validator\n return obj\n\n def parse_known_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> Tuple[Callable, inspect.BoundArguments, List[str]]:\n \"\"\"Interpret arguments into a function, :class:`~inspect.BoundArguments`, and any remaining unknown tokens.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``\n\n Returns\n -------\n command: Callable\n Bare function to execute.\n\n bound: inspect.BoundArguments\n Bound arguments for ``command``.\n\n unused_tokens: List[str]\n Any remaining CLI tokens that didn't get parsed for ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n command_chain, apps, unused_tokens = self._parse_command_chain(tokens)\n command_app = apps[-1]\n\n try:\n parent_app = apps[-2]\n except IndexError:\n parent_app = None\n\n try:\n if command_app.default_command:\n command = command_app.default_command\n resolved_command = ResolvedCommand(\n command,\n _resolve_default_parameter(apps),\n command_app.group_arguments,\n command_app.group_parameters,\n parse_docstring=False,\n )\n # We want the resolved group that ``app`` belongs to.\n if parent_app is None:\n command_groups = []\n else:\n command_groups = _get_command_groups(parent_app, command_app)\n\n bound, unused_tokens = create_bound_arguments(resolved_command, unused_tokens)\n try:\n if command_app.converter:\n bound.arguments = command_app.converter(**bound.arguments)\n for command_group in command_groups:\n if command_group.converter:\n bound.arguments = command_group.converter(**bound.arguments)\n for validator in command_app.validator:\n validator(**bound.arguments)\n for command_group in command_groups:\n for validator in command_group.validator:\n validator(**bound.arguments)\n except (AssertionError, ValueError, TypeError) as e:\n new_exception = ValidationError(value=e.args[0])\n raise new_exception from e\n\n return command, bound, unused_tokens\n else:\n if unused_tokens:\n raise InvalidCommandError(unused_tokens=unused_tokens)\n else:\n # Running the application with no arguments and no registered\n # ``default_command`` will default to ``help_print``.\n command = self.help_print\n bound = inspect.signature(command).bind(tokens=tokens, console=console)\n return command, bound, []\n except CycloptsError as e:\n e.app = command_app\n if command_chain:\n e.command_chain = command_chain\n raise\n\n raise NotImplementedError(\"Should never get here.\")\n\n def parse_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ) -> Tuple[Callable, inspect.BoundArguments]:\n \"\"\"Interpret arguments into a function and :class:`~inspect.BoundArguments`.\n\n **Does** handle special flags like \"version\" or \"help\".\n\n Raises\n ------\n UnusedCliTokensError\n If any tokens remain after parsing.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n command: Callable\n Function associated with command action.\n\n bound: inspect.BoundArguments\n Parsed and converted ``args`` and ``kwargs`` to be used when calling ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n meta_parent = self\n\n try:\n # Special flags (help/version) get bubbled up to the root app.\n # The root ``help_print`` will then traverse the meta app linked list.\n\n # The Help Flag is allowed to be anywhere in the token stream.\n help_flag_index = None\n for help_flag in self.help_flags:\n try:\n help_flag_index = tokens.index(help_flag)\n break\n except ValueError:\n pass\n\n if help_flag_index is not None:\n tokens.pop(help_flag_index)\n command = self.help_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.help_print\n bound = inspect.signature(command).bind(tokens, console=console)\n unused_tokens = []\n elif any(flag in tokens for flag in self.version_flags):\n # Version\n command = self.version_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.version_print\n bound = inspect.signature(command).bind()\n unused_tokens = []\n else:\n # Normal parsing\n command, bound, unused_tokens = self.parse_known_args(tokens, console=console)\n if unused_tokens:\n raise UnusedCliTokensError(\n target=command,\n unused_tokens=unused_tokens,\n )\n except CycloptsError as e:\n e.verbose = verbose\n e.root_input_tokens = tokens\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n else:\n raise\n\n return command, bound\n\n def __call__(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ):\n \"\"\"Interprets and executes a command.\n\n Parameters\n ----------\n tokens : Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n return_value: Any\n The value the parsed command handler returns.\n \"\"\"\n tokens = normalize_tokens(tokens)\n command, bound = self.parse_args(\n tokens,\n console=console,\n print_error=print_error,\n exit_on_error=exit_on_error,\n verbose=verbose,\n )\n try:\n return command(*bound.args, **bound.kwargs)\n except Exception as e:\n if PydanticValidationError is not None and isinstance(e, PydanticValidationError):\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n raise\n\n def help_print(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> None:\n \"\"\"Print the help page.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Tokens to interpret for traversing the application command structure.\n If not provided, defaults to ``sys.argv``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n if console is None:\n console = Console()\n\n command_chain, apps, _ = self._parse_command_chain(tokens)\n executing_app = apps[-1]\n\n # Print the:\n # my-app command COMMAND [ARGS] [OPTIONS]\n if executing_app.usage is None:\n console.print(format_usage(self, command_chain))\n elif executing_app.usage: # i.e. skip empty-string.\n console.print(executing_app.usage + \"\\n\")\n\n # Print the App/Command's Doc String.\n console.print(format_doc(self, executing_app))\n\n def walk_apps():\n # Iterates from deepest to shallowest meta-apps\n meta_list = [] # shallowest to deepest\n meta_list.append(executing_app)\n meta = executing_app\n while (meta := meta._meta) and meta.default_command:\n meta_list.append(meta)\n yield from reversed(meta_list)\n\n panels: Dict[str, Tuple[Group, HelpPanel]] = {}\n # Handle commands first; there's an off chance they may be \"upgraded\"\n # to an argument/parameter panel.\n for subapp in walk_apps():\n # Handle Commands\n for group, elements in groups_from_app(subapp):\n if not group.show:\n continue\n\n try:\n _, command_panel = panels[group.name]\n except KeyError:\n command_panel = HelpPanel(\n format=\"command\",\n title=group.name,\n )\n panels[group.name] = (group, command_panel)\n\n if group.help:\n if command_panel.description:\n command_panel.description += \"\\n\" + group.help\n else:\n command_panel.description = group.help\n\n command_panel.entries.extend(format_command_entries(elements))\n\n # Handle Arguments/Parameters\n for subapp in walk_apps():\n if subapp.default_command:\n command = ResolvedCommand(\n subapp.default_command,\n subapp.default_parameter,\n subapp.group_arguments,\n subapp.group_parameters,\n )\n for group, iparams in command.groups_iparams:\n if not group.show:\n continue\n cparams = [command.iparam_to_cparam[x] for x in iparams]\n try:\n _, existing_panel = panels[group.name]\n except KeyError:\n existing_panel = None\n new_panel = create_parameter_help_panel(group, iparams, cparams)\n\n if existing_panel:\n # An imperfect merging process\n existing_panel.format = \"parameter\"\n existing_panel.entries = new_panel.entries + existing_panel.entries # Commands go last\n if new_panel.description:\n if existing_panel.description:\n existing_panel.description += \"\\n\" + new_panel.description\n else:\n existing_panel.description = new_panel.description\n else:\n panels[group.name] = (group, new_panel)\n\n groups = [x[0] for x in panels.values()]\n help_panels = [x[1] for x in panels.values()]\n\n for help_panel in sort_groups(groups, help_panels)[1]:\n help_panel.remove_duplicates()\n if help_panel.format == \"command\":\n # don't sort format == \"parameter\" because order may matter there!\n help_panel.sort()\n console.print(help_panel)\n\n def interactive_shell(\n self,\n prompt: str = \"$ \",\n quit: Union[None, str, Iterable[str]] = None,\n dispatcher: Optional[Dispatcher] = None,\n **kwargs,\n ) -> None:\n \"\"\"Create a blocking, interactive shell.\n\n All registered commands can be executed in the shell.\n\n Parameters\n ----------\n prompt: str\n Shell prompt. Defaults to ``\"$ \"``.\n quit: Union[str, Iterable[str]]\n String or list of strings that will cause the shell to exit and this method to return.\n Defaults to ``[\"q\", \"quit\"]``.\n dispatcher: Optional[Dispatcher]\n Optional function that subsequently invokes the command.\n The ``dispatcher`` function must have signature:\n\n .. code-block:: python\n\n def dispatcher(command: Callable, bound: inspect.BoundArguments) -> Any:\n return command(*bound.args, **bound.kwargs)\n\n The above is the default dispatcher implementation.\n `**kwargs`\n Get passed along to :meth:`parse_args`.\n \"\"\"\n if os.name == \"posix\":\n print(\"Interactive shell. Press Ctrl-D to exit.\")\n else: # Windows\n print(\"Interactive shell. Press Ctrl-Z followed by Enter to exit.\")\n\n if quit is None:\n quit = [\"q\", \"quit\"]\n if isinstance(quit, str):\n quit = [quit]\n\n def default_dispatcher(command, bound):\n return command(*bound.args, **bound.kwargs)\n\n if dispatcher is None:\n dispatcher = default_dispatcher\n\n kwargs.setdefault(\"exit_on_error\", False)\n\n while True:\n try:\n user_input = input(prompt)\n except EOFError:\n break\n\n tokens = normalize_tokens(user_input)\n if not tokens:\n continue\n if tokens[0] in quit:\n break\n\n try:\n command, bound = self.parse_args(tokens, **kwargs)\n dispatcher(command, bound)\n except CycloptsError:\n # Upstream ``parse_args`` already printed the error\n pass\n except Exception:\n print(traceback.format_exc())\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n non_defaults = {}\n for a in self.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if not a.init:\n continue\n v = getattr(self, a.name)\n # Compare types first because of some weird attribute issues.\n if type(v) != type(a.default) or v != a.default: # noqa: E721\n non_defaults[a.alias] = v\n\n signature = \", \".join(f\"{k}={v!r}\" for k, v in non_defaults.items())\n return f\"{type(self).__name__}({signature})\"" }, { "identifier": "Group", "path": "cyclopts/group.py", "snippet": "class Group:\n name: str = \"\"\n\n help: str = \"\"\n\n # All below parameters are keyword-only\n _show: Optional[bool] = field(default=None, alias=\"show\", kw_only=True)\n\n _sort_key: Any = field(\n default=None,\n alias=\"sort_key\",\n converter=lambda x: NO_USER_SORT_KEY if x is None else x,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n\n validator: Tuple[Callable, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n kw_only=True,\n )\n\n default_parameter: Optional[\"Parameter\"] = field(\n default=None,\n validator=_group_default_parameter_must_be_none,\n kw_only=True,\n )\n\n def __str__(self):\n return self.name\n\n @property\n def show(self):\n return bool(self.name) if self._show is None else self._show\n\n @show.setter\n def show(self, value):\n self._show = value\n\n @property\n def sort_key(self):\n return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key\n\n @sort_key.setter\n def sort_key(self, value):\n self._sort_key = value\n\n @classmethod\n def create_default_arguments(cls):\n return cls(\"Arguments\")\n\n @classmethod\n def create_default_parameters(cls):\n return cls(\"Parameters\")\n\n @classmethod\n def create_default_commands(cls):\n return cls(\"Commands\")\n\n @classmethod\n def create_ordered(cls, *args, sort_key=None, **kwargs):\n \"\"\"Create a group with a globally incremented :attr:`~Group.sort_key`.\n\n Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.\n\n If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).\n \"\"\"\n count = next(_sort_key_counter)\n if sort_key is None:\n sort_key = (NO_USER_SORT_KEY, count)\n elif is_iterable(sort_key):\n sort_key = (tuple(sort_key), count)\n else:\n sort_key = (sort_key, count)\n return cls(*args, sort_key=sort_key, **kwargs)" }, { "identifier": "Parameter", "path": "cyclopts/parameter.py", "snippet": "class Parameter:\n \"\"\"Cyclopts configuration for individual function parameters.\"\"\"\n\n # All documentation has been moved to ``docs/api.rst`` for greater control with attrs.\n\n name: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert))\n\n validator: Tuple[Callable, ...] = field(\n default=(),\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n )\n\n negative: Union[None, Tuple[str, ...]] = field(default=None, converter=optional_to_tuple_converter)\n\n group: Tuple[Union[Group, str], ...] = field(default=None, converter=to_tuple_converter, hash=False)\n\n parse: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n _show: Optional[bool] = field(default=None, alias=\"show\")\n\n show_default: Optional[bool] = field(default=None)\n\n show_choices: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n help: Optional[str] = field(default=None)\n\n show_env_var: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n env_var: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n negative_bool: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--no-\",)),\n validator=_double_hyphen_validator,\n )\n\n negative_iterable: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--empty-\",)),\n validator=_double_hyphen_validator,\n )\n\n required: Optional[bool] = field(default=None)\n\n allow_leading_hyphen: bool = field(default=False)\n\n # Populated by the record_attrs_init_args decorator.\n _provided_args: Tuple[str] = field(default=(), init=False, eq=False)\n\n @property\n def show(self):\n return self._show if self._show is not None else self.parse\n\n def get_negatives(self, type_, *names: str) -> Tuple[str, ...]:\n type_ = get_origin(type_) or type_\n\n if self.negative is not None:\n return self.negative\n elif type_ not in (bool, list, set):\n return ()\n\n out = []\n for name in names:\n if name.startswith(\"--\"):\n name = name[2:]\n elif name.startswith(\"-\"):\n # Do not support automatic negation for short flags.\n continue\n else:\n # Should never reach here.\n raise NotImplementedError(\"All parameters should have started with '-' or '--'.\")\n\n negative_prefixes = self.negative_bool if type_ is bool else self.negative_iterable\n\n for negative_prefix in negative_prefixes:\n out.append(f\"{negative_prefix}{name}\")\n return tuple(out)\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n content = \", \".join(\n [\n f\"{a.alias}={getattr(self, a.name)!r}\"\n for a in self.__attrs_attrs__ # pyright: ignore[reportGeneralTypeIssues]\n if a.alias in self._provided_args\n ]\n )\n return f\"{type(self).__name__}({content})\"\n\n @classmethod\n def combine(cls, *parameters: Optional[\"Parameter\"]) -> \"Parameter\":\n \"\"\"Returns a new Parameter with values of ``parameters``.\n\n Parameters\n ----------\n `*parameters`: Optional[Parameter]\n Parameters who's attributes override ``self`` attributes.\n Ordered from least-to-highest attribute priority.\n \"\"\"\n kwargs = {}\n for parameter in parameters:\n if parameter is None:\n continue\n for a in parameter.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if a.init and a.alias in parameter._provided_args:\n kwargs[a.alias] = getattr(parameter, a.name)\n\n return cls(**kwargs)\n\n @classmethod\n def default(cls) -> \"Parameter\":\n \"\"\"Create a Parameter with all Cyclopts-default values.\n\n This is different than just :class:`Parameter` because the default\n values will be recorded and override all upstream parameter values.\n \"\"\"\n return cls(\n **{a.alias: a.default for a in cls.__attrs_attrs__ if a.init} # pyright: ignore[reportGeneralTypeIssues]\n )" }, { "identifier": "HelpEntry", "path": "cyclopts/help.py", "snippet": "class HelpEntry:\n name: str\n short: str = \"\"\n description: str = \"\"\n required: bool = False" }, { "identifier": "HelpPanel", "path": "cyclopts/help.py", "snippet": "class HelpPanel:\n format: Literal[\"command\", \"parameter\"]\n title: str\n description: str = \"\"\n entries: List[HelpEntry] = field(factory=list)\n\n def remove_duplicates(self):\n seen, out = set(), []\n for item in self.entries:\n if item not in seen:\n seen.add(item)\n out.append(item)\n self.entries = out\n\n def sort(self):\n self.entries.sort(key=lambda x: (x.name.startswith(\"-\"), x.name))\n\n def __rich__(self):\n if not self.entries:\n return _silent\n table = Table.grid(padding=(0, 1))\n text = Text(end=\"\")\n if self.description:\n text.append(self.description + \"\\n\\n\")\n panel = Panel(\n console.Group(text, table),\n box=box.ROUNDED,\n expand=True,\n title_align=\"left\",\n title=self.title,\n )\n\n if self.format == \"command\":\n table.add_column(justify=\"left\", style=\"cyan\")\n table.add_column(justify=\"left\")\n\n for entry in self.entries:\n name = entry.name\n if entry.short:\n name += \",\" + entry.short\n table.add_row(name + \" \", entry.description)\n elif self.format == \"parameter\":\n has_short = any(entry.short for entry in self.entries)\n has_required = any(entry.required for entry in self.entries)\n\n if has_required:\n table.add_column(justify=\"left\", width=1, style=\"red bold\") # For asterisk\n table.add_column(justify=\"left\", no_wrap=True, style=\"cyan\") # For option names\n if has_short:\n table.add_column(justify=\"left\", no_wrap=True, style=\"green\") # For short options\n table.add_column(justify=\"left\") # For main help text.\n\n for entry in self.entries:\n row = []\n if has_required:\n if entry.required:\n row.append(\"*\")\n else:\n row.append(\"\")\n row.append(entry.name + \" \")\n if has_short:\n row.append(entry.short + \" \")\n row.append(entry.description)\n table.add_row(*row)\n else:\n raise NotImplementedError\n\n return panel" }, { "identifier": "create_parameter_help_panel", "path": "cyclopts/help.py", "snippet": "def create_parameter_help_panel(group: \"Group\", iparams, cparams: List[Parameter]) -> HelpPanel:\n icparams = [(ip, cp) for ip, cp in zip(iparams, cparams) if cp.show]\n iparams, cparams = (list(x) for x in zip(*icparams))\n\n help_panel = HelpPanel(format=\"parameter\", title=group.name, description=group.help)\n\n for iparam, cparam in icparams:\n assert cparam.name is not None\n type_ = get_hint_parameter(iparam)[0]\n options = list(cparam.name)\n options.extend(cparam.get_negatives(type_, *options))\n\n # Add an all-uppercase name if it's an argument\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):\n arg_name = options[0].lstrip(\"-\").upper()\n if arg_name != options[0]:\n options = [arg_name, *options]\n\n short_options, long_options = [], []\n for option in options:\n if _is_short(option):\n short_options.append(option)\n else:\n long_options.append(option)\n\n help_components = []\n\n if cparam.help:\n help_components.append(cparam.help)\n\n if cparam.show_choices:\n choices = _get_choices(type_)\n if choices:\n help_components.append(rf\"[dim]\\[choices: {choices}][/dim]\")\n\n if cparam.show_env_var and cparam.env_var:\n env_vars = \" \".join(cparam.env_var)\n help_components.append(rf\"[dim]\\[env var: {env_vars}][/dim]\")\n\n if not cparam.required and (\n cparam.show_default or (cparam.show_default is None and iparam.default is not None)\n ):\n default = \"\"\n if isclass(type_) and issubclass(type_, Enum):\n default = iparam.default.name.lower().replace(\"_\", \"-\")\n else:\n default = iparam.default\n\n help_components.append(rf\"[dim]\\[default: {default}][/dim]\")\n\n if cparam.required:\n help_components.append(r\"[red][dim]\\[required][/dim][/red]\")\n\n # populate row\n help_panel.entries.append(\n HelpEntry(\n name=\",\".join(long_options),\n description=\" \".join(help_components),\n short=\",\".join(short_options),\n required=bool(cparam.required),\n )\n )\n\n return help_panel" }, { "identifier": "format_command_entries", "path": "cyclopts/help.py", "snippet": "def format_command_entries(elements) -> List:\n entries = []\n for element in elements:\n short_names, long_names = [], []\n for name in element.name:\n short_names.append(name) if _is_short(name) else long_names.append(name)\n entry = HelpEntry(\n name=\",\".join(long_names),\n short=\",\".join(short_names),\n description=docstring_parse(element.help).short_description or \"\",\n )\n if entry not in entries:\n entries.append(entry)\n return entries" }, { "identifier": "format_doc", "path": "cyclopts/help.py", "snippet": "def format_doc(root_app, app: \"App\"):\n from cyclopts.core import App # noqa: F811\n\n raw_doc_string = app.help\n\n if not raw_doc_string:\n return _silent\n\n parsed = docstring_parse(raw_doc_string)\n\n components: List[Tuple[str, str]] = []\n if parsed.short_description:\n components.append((parsed.short_description + \"\\n\", \"default\"))\n\n if parsed.long_description:\n components.append((\"\\n\" + parsed.long_description + \"\\n\", \"info\"))\n\n return Text.assemble(*components)" }, { "identifier": "format_usage", "path": "cyclopts/help.py", "snippet": "def format_usage(\n app,\n command_chain: List[str],\n):\n usage = []\n usage.append(\"Usage:\")\n usage.append(app.name[0])\n usage.extend(command_chain)\n\n for command in command_chain:\n app = app[command]\n\n if app._commands:\n usage.append(\"COMMAND\")\n\n if app.default_command:\n to_show = set()\n for parameter in inspect.signature(app.default_command).parameters.values():\n if parameter.kind in (parameter.POSITIONAL_ONLY, parameter.VAR_POSITIONAL, parameter.POSITIONAL_OR_KEYWORD):\n to_show.add(\"[ARGS]\")\n if parameter.kind in (parameter.KEYWORD_ONLY, parameter.VAR_KEYWORD, parameter.POSITIONAL_OR_KEYWORD):\n to_show.add(\"[OPTIONS]\")\n usage.extend(sorted(to_show))\n\n return Text(\" \".join(usage) + \"\\n\", style=\"bold\")" }, { "identifier": "ResolvedCommand", "path": "cyclopts/resolve.py", "snippet": "class ResolvedCommand:\n command: Callable\n groups: List[Group]\n groups_iparams: List[Tuple[Group, List[inspect.Parameter]]]\n iparam_to_groups: ParameterDict\n iparam_to_cparam: ParameterDict\n name_to_iparam: Dict[str, inspect.Parameter]\n\n def __init__(\n self,\n f,\n app_parameter: Optional[Parameter] = None,\n group_arguments: Optional[Group] = None,\n group_parameters: Optional[Group] = None,\n parse_docstring: bool = True,\n ):\n \"\"\"\n ``app_parameter`` implicitly has the command-group parameter already resolved.\n\n Parameters\n ----------\n f: Callable\n Function to resolve annotated :class:`Parameters`.\n app_parameter:\n Default :class:`Parameter` to inherit configuration from.\n group_arguments: Optional[Group]\n Default :class:`Group` for positional-only arguments.\n group_parameters: Optional[Group]\n Default :class:`Group` for non-positional-only arguments.\n parse_docstring: bool\n Parse the docstring to populate Parameter ``help``, if not explicitly set.\n Disable for improved performance if ``help`` won't be used in the resulting :class:`Parameter`.\n \"\"\"\n if group_arguments is None:\n group_arguments = Group.create_default_arguments()\n if group_parameters is None:\n group_parameters = Group.create_default_parameters()\n\n self.command = f\n signature = inspect.signature(f)\n self.name_to_iparam = cast(Dict[str, inspect.Parameter], signature.parameters)\n\n # Get:\n # 1. Fully resolved and created Groups.\n # 2. A mapping of inspect.Parameter to those Group objects.\n self.groups, self.iparam_to_groups = _resolve_groups(f, app_parameter, group_arguments, group_parameters)\n\n # Fully Resolve each Cyclopts Parameter\n self.iparam_to_cparam = ParameterDict()\n iparam_to_docstring_cparam = _resolve_docstring(f) if parse_docstring else ParameterDict()\n for iparam, groups in self.iparam_to_groups.items():\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_POSITIONAL):\n # Name is only used for help-string\n names = [iparam.name.upper()]\n else:\n names = [\"--\" + iparam.name.replace(\"_\", \"-\")]\n\n default_name_parameter = Parameter(name=names)\n\n cparam = get_hint_parameter(\n iparam,\n app_parameter,\n *(x.default_parameter for x in groups),\n iparam_to_docstring_cparam.get(iparam),\n default_name_parameter,\n Parameter(required=iparam.default is iparam.empty),\n )[1]\n self.iparam_to_cparam[iparam] = cparam\n\n self.bind = signature.bind_partial if _has_unparsed_parameters(f, app_parameter) else signature.bind\n\n # Create a convenient group-to-iparam structure\n self.groups_iparams = [\n (\n group,\n [iparam for iparam, groups in self.iparam_to_groups.items() if group in groups],\n )\n for group in self.groups\n ]" } ]
import inspect import sys import attrs import pytest from enum import Enum from textwrap import dedent from typing import List, Literal, Optional, Union from typing_extensions import Annotated from typing import Annotated from cyclopts import App, Group, Parameter from cyclopts.help import ( HelpEntry, HelpPanel, create_parameter_help_panel, format_command_entries, format_doc, format_usage, ) from cyclopts.resolve import ResolvedCommand
11,284
@app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd):
if sys.version_info < (3, 9): else: @pytest.fixture def app(): return App( name="app", help="App Help String Line 1.", ) def test_empty_help_panel_rich_silent(console): help_panel = HelpPanel(format="command", title="test") with console.capture() as capture: console.print(help_panel) actual = capture.get() assert actual == "" def test_help_default_action(app, console): """No command should default to help.""" with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage(app, console): app.usage = "My custom usage." with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ My custom usage. App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage_subapp(app, console): app.command(App(name="foo", usage="My custom usage.")) with console.capture() as capture: app(["foo", "--help"], console=console) actual = capture.get() expected = dedent( """\ My custom usage. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_default_help_flags(console): """Standard help flags.""" app = App(name="app", help="App Help String Line 1.") with console.capture() as capture: app(["--help"], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_usage_empty(console): app = App( name="app", help="App Help String Line 1.", help_flags=[], version_flags=[], ) with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app\n\n" def test_help_format_usage_command(app, console): @app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd):
command = ResolvedCommand(cmd, *default_function_groups)
9
2023-11-03 02:24:25+00:00
16k
RoboFlamingo/RoboFlamingo
robot_flamingo/models/factory.py
[ { "identifier": "BCFlamingo", "path": "robot_flamingo/models/flamingo_bc.py", "snippet": "class BCFlamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n use_media_placement_augmentation: bool = False,\n # this is the window size sampled from the episode\n window_size: int = 8,\n use_gripper=False,\n fusion_mode='',\n sep_resampler=False,\n use_state=False,\n use_diff=False,\n diff_horizon=32,\n last_action=False,\n n_timesteps=150,\n state_dim=15,\n use_hist=False,\n debug=False,\n predict_epsilon=True,\n pad_length=-1,\n multi_step_action=1,\n sep_lm_head=False,\n return_feature = False,\n llm='llama_9b',\n pooling='max',\n residual=False,\n tcp_rel=False,\n replan=-1,\n decoder_type='lstm',\n hidden_size=None,\n fwd_pred=False,\n fwd_pred_hand=False,\n refresh=-1\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.\n \"\"\"\n super().__init__()\n self.use_gripper = use_gripper\n self.use_state = use_state\n self.fusion_mode = fusion_mode\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.vis_dim = vis_dim\n self.window_size = window_size\n self.tcp_rel = tcp_rel\n self.act_step = multi_step_action\n print('window size: {}'.format(window_size))\n self.vision_encoder = vision_encoder\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.sep_resampler = sep_resampler\n self.use_hist = use_hist\n self.lang_encoder = lang_encoder\n self.pad_length = pad_length\n self.replan = replan\n if self.replan != -1:\n self.replan = min(int(replan * self.window_size), 180)\n self.refresh = refresh\n if hasattr(lang_encoder.config, \"d_model\"):\n self.lang_dim = lang_encoder.config.d_model # mpt uses d_model\n else:\n self.lang_dim = lang_encoder.config.hidden_size\n \n # print(self.vis_dim, self.lang_dim)\n \n self.residual = residual\n\n if not debug:\n if 'llama' in llm:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n use_media_placement_augmentation=self.use_media_placement_augmentation,\n residual=residual,\n )\n else:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n lang_hidden_size=self.lang_dim,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n gradient_checkpointing=False,\n )\n\n if sep_resampler:\n self.perceiver_gripper = PerceiverResampler(dim=self.vis_dim)\n self.perceiver_gripper.load_state_dict(copy.deepcopy(self.perceiver.state_dict()))\n if use_state:\n self.state_fc = nn.Linear(state_dim, self.vis_dim)\n if use_hist:\n self.frame_embs = nn.Parameter(torch.randn(self.window_size, self.vis_dim))\n # To-do: nn archiecture for actor\n self.llm = llm\n if llm=='llama':\n in_features = lang_encoder.lm_head.in_features\n else:\n in_features = self.lang_dim\n self.use_diff = use_diff\n self.decoder_type = decoder_type\n if decoder_type == 'lstm':\n lm_head = DeterministicDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action, pooling=pooling)\n self.lang_encoder.lm_head = lm_head\n elif decoder_type == 'fc':\n if use_hist:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n elif 'vit_concat' in fusion_mode:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n else:\n raise NotImplementedError\n elif decoder_type == 'diffusion':\n if use_diff:\n self.diffusion_model = DiffusionDecoder(\n self.action_head.hidden_size, \n self.window_size,\n input_dim=self.action_head.out_features+1,\n n_timesteps=n_timesteps,\n horizon=diff_horizon,\n predict_epsilon=predict_epsilon,\n )\n else:\n raise NotImplementedError\n elif decoder_type=='gpt':\n lm_head = GPTDecoder(in_features, self.window_size, use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, multi_step_action=multi_step_action, pooling=pooling, hidden_size=hidden_size)\n self.lang_encoder.lm_head = self.action_head = lm_head\n else:\n raise NotImplementedError\n\n self.sep_lm_head = sep_lm_head\n if sep_lm_head:\n self.lm_head = self.lang_encoder.lm_head\n self.lang_encoder.lm_head = nn.Identity()\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n use_cached_vision_x: bool = False,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n vision_gripper = None,\n state_tensor = None,\n return_feature = False,\n policy_mask=None\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n raw_rgb = vision_x.clone()\n raw_gripper = vision_gripper.clone()\n assert (\n vision_x is not None\n ) or use_cached_vision_x, (\n \"Must provide either vision_x or use_cached_vision_x to True.\"\n )\n\n if use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when use_cached_vision_x is True.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n if self.use_hist:\n self._encode_history_vision_post_fusion(vision_x, vision_gripper, state_tensor)\n else:\n if not self.use_gripper or self.fusion_mode == 'two_way':\n vision_x = self._encode_vision_x(vision_x=vision_x)\n else:\n if self.fusion_mode == 'pre':\n self._encode_multi_vision_pre_fusion(vision_x, vision_gripper, state_tensor)\n elif self.fusion_mode == 'post':\n self._encode_multi_vision_post_fusion(vision_x, vision_gripper, state_tensor)\n elif self.fusion_mode == 'vit_concat':\n self._encode_history_vision_fc_post(vision_x, vision_gripper, state_tensor)\n \n if 'llama' in self.llm:\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask,\n # labels=labels, # 不输入label,程序就不会计算loss\n past_key_values=past_key_values,\n use_cache=use_cache,\n )\n else:\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask\n )\n \n if self.sep_lm_head:\n output_llm = output.logits\n output_lm_head = self.lm_head(output_llm, state_tensor=state_tensor, return_feature=return_feature)\n output.logits = output_lm_head\n \n if clear_conditioned_layers:\n self.lang_encoder.clear_conditioned_layers()\n\n # action_seq = self.action_head(vision_x)\n return output\n\n # Generate function with actor for text time adpatation\n def generate(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n num_beams=1,\n max_new_tokens=None,\n temperature=1.0,\n top_k=0,\n top_p=1.0,\n no_repeat_ngram_size=0,\n prefix_allowed_tokens_fn=None,\n length_penalty=1.0,\n num_return_sequences=1,\n do_sample=False,\n early_stopping=False,\n ):\n \"\"\"\n Generate text conditioned on vision and language inputs.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n images in the same chunk are collated along T_img, and frames are collated along F\n currently only F=1 is supported (single-frame videos)\n lang_x (torch.Tensor): Language input\n shape (B, T_txt)\n max_length (int, optional): Maximum length of the output. Defaults to None.\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n num_beams (int, optional): Number of beams. Defaults to 1.\n max_new_tokens (int, optional): Maximum new tokens. Defaults to None.\n temperature (float, optional): Temperature. Defaults to 1.0.\n top_k (int, optional): Top k. Defaults to 0.\n top_p (float, optional): Top p. Defaults to 1.0.\n no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.\n length_penalty (float, optional): Length penalty. Defaults to 1.0.\n num_return_sequences (int, optional): Number of return sequences. Defaults to 1.\n do_sample (bool, optional): Do sample. Defaults to False.\n early_stopping (bool, optional): Early stopping. Defaults to False.\n Returns:\n torch.Tensor: lang_x with generated tokens appended to it\n \"\"\"\n if num_beams > 1:\n vision_x = vision_x.repeat_interleave(num_beams, dim=0)\n\n self._encode_vision_x(vision_x=vision_x)\n\n output = self.lang_encoder.generate(\n lang_x,\n attention_mask=attention_mask,\n eos_token_id=self.eoc_token_id,\n num_beams=num_beams,\n max_new_tokens=max_new_tokens,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n no_repeat_ngram_size=no_repeat_ngram_size,\n length_penalty=length_penalty,\n num_return_sequences=num_return_sequences,\n do_sample=do_sample,\n early_stopping=early_stopping,\n )\n\n self.lang_encoder.clear_conditioned_layers()\n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_vision(self, vision_x: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n return vision_x\n\n def _encode_multi_vision_pre_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=3)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n # state_tensor = state_tensor.double()\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_two_way(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=0) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=0) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_history_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = vision_rgb.view(bs, self.window_size, *vision_rgb.shape[1:])\n _, _, T, p, v_tok, dim = vision_rgb.shape[:6]\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_rgb = vision_rgb + frame_embs\n vision_rgb = rearrange(vision_rgb, 'b F T p v d -> (b F) T p v d')\n vision_rgb = self.perceiver(vision_rgb)\n\n vision_gripper = vision_gripper.view(vision_gripper.shape[0] // self.window_size, self.window_size,\n *vision_gripper.shape[1:])\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_gripper = vision_gripper + frame_embs\n vision_gripper = rearrange(vision_gripper, 'b F T p v d -> (b F) T p v d')\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n \n def _encode_history_vision_fc_post(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = self._encode_vision(vision_rgb)\n vision_rgb = self.perceiver(vision_rgb) # BxL, T, n, d\n vision_rgb = vision_rgb.view(-1, self.window_size, *vision_rgb.shape[1:]) # B, L, T, n, d\n vision_rgb = rearrange(vision_rgb, 'b L T n d -> b T (n L) d')\n\n vision_gripper = self._encode_vision(vision_gripper)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n vision_gripper = vision_gripper.view(-1, self.window_size, *vision_gripper.shape[1:]) # B, L, T, n, d\n vision_gripper = rearrange(vision_gripper, 'b L T n d -> b T (n L) d')\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2)\n\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n \n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x" }, { "identifier": "MPTFlamingo", "path": "robot_flamingo/models/flamingo_mpt.py", "snippet": "class MPTFlamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n use_media_placement_augmentation: bool = False,\n # this is the window size sampled from the episode\n window_size: int = 8,\n use_gripper=False,\n fusion_mode='',\n sep_resampler=False,\n use_state=False,\n use_diff=False,\n diff_horizon=32,\n last_action=False,\n n_timesteps=150,\n state_dim=15,\n use_hist=False,\n debug=False,\n predict_epsilon=True,\n pad_length=-1,\n multi_step_action=1,\n sep_lm_head=False,\n return_feature = False,\n llm='llama',\n pooling='max',\n residual=False,\n tcp_rel=False,\n replan=-1,\n decoder_type='lstm',\n hidden_size=None,\n fwd_pred=False,\n fwd_pred_hand=False,\n global_latent=10,\n no_image_patch=False,\n refresh=-1\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.\n \"\"\"\n super().__init__()\n\n self.use_gripper = use_gripper\n self.use_state = use_state\n self.fusion_mode = fusion_mode\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.vis_dim = vis_dim\n self.window_size = window_size\n self.tcp_rel = tcp_rel\n self.act_step = multi_step_action\n print('window size: {}'.format(window_size))\n self.vision_encoder = vision_encoder\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.sep_resampler = sep_resampler\n self.use_hist = use_hist\n self.lang_encoder = lang_encoder\n self.pad_length = pad_length\n self.replan = replan\n if self.replan != -1:\n self.replan = min(int(replan * self.window_size), 180)\n self.refresh = refresh\n if hasattr(lang_encoder.config, \"d_model\"):\n self.lang_dim = lang_encoder.config.d_model # mpt uses d_model\n else:\n self.lang_dim = lang_encoder.config.hidden_size\n\n self.residual = residual\n print(self.vis_dim, self.lang_dim)\n print(lang_encoder.config)\n if not debug:\n if 'llama' in llm:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n use_media_placement_augmentation=self.use_media_placement_augmentation,\n residual=residual,\n )\n else:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n lang_hidden_size=self.lang_dim,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n gradient_checkpointing=False,\n )\n\n if sep_resampler:\n self.perceiver_gripper = PerceiverResampler(dim=self.vis_dim)\n self.perceiver_gripper.load_state_dict(copy.deepcopy(self.perceiver.state_dict()))\n if use_state:\n self.state_fc = nn.Linear(state_dim, self.vis_dim)\n if use_hist:\n self.frame_embs = nn.Parameter(torch.randn(self.window_size, self.vis_dim))\n # To-do: nn archiecture for actor\n self.llm = llm\n if llm=='llama':\n in_features = lang_encoder.lm_head.in_features\n else:\n in_features = self.lang_dim\n self.use_diff = use_diff\n self.decoder_type = decoder_type\n if decoder_type == 'lstm':\n lm_head = DeterministicDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action, pooling=pooling)\n self.lang_encoder.lm_head = lm_head\n elif decoder_type == 'fc':\n if use_hist:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n elif 'vit_concat' in fusion_mode:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n else:\n raise NotImplementedError\n elif decoder_type == 'diffusion':\n if use_diff:\n self.diffusion_model = DiffusionDecoder(\n self.action_head.hidden_size, \n self.window_size,\n input_dim=self.action_head.out_features+1,\n n_timesteps=n_timesteps,\n horizon=diff_horizon,\n predict_epsilon=predict_epsilon,\n )\n else:\n raise NotImplementedError\n elif decoder_type=='gpt':\n lm_head = GPTDecoder(in_features, self.window_size, use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, multi_step_action=multi_step_action, pooling=pooling, hidden_size=hidden_size)\n self.lang_encoder.lm_head = self.action_head = lm_head\n else:\n raise NotImplementedError\n \n sep_lm_head = True\n self.sep_lm_head = sep_lm_head\n if sep_lm_head:\n self.lm_head = self.lang_encoder.lm_head\n self.lang_encoder.lm_head = nn.Identity()\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n use_cached_vision_x: bool = False,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n vision_gripper = None,\n state_tensor = None,\n return_feature = False,\n policy_mask=None\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n raw_rgb = vision_x.clone()\n raw_gripper = vision_gripper.clone()\n assert (\n vision_x is not None\n ) or use_cached_vision_x, (\n \"Must provide either vision_x or use_cached_vision_x to True.\"\n )\n\n if use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when use_cached_vision_x is True.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n if self.use_hist:\n self._encode_history_vision_post_fusion(vision_x, vision_gripper)\n else:\n if not self.use_gripper or self.fusion_mode == 'two_way':\n vision_x = self._encode_vision_x(vision_x=vision_x)\n else:\n if self.fusion_mode == 'pre':\n self._encode_multi_vision_pre_fusion(vision_x, vision_gripper)\n elif self.fusion_mode == 'post':\n self._encode_multi_vision_post_fusion(vision_x, vision_gripper)\n elif self.fusion_mode == 'vit_concat':\n self._encode_history_vision_fc_post(vision_x, vision_gripper)\n \n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask.bool(),\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_hidden_states=True\n )\n\n output_hs = output.hidden_states[-1]\n output_hs = self.lm_head(output_hs, state_tensor=state_tensor, return_feature=return_feature)\n output.logits = output_hs\n \n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_vision(self, vision_x: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n return vision_x\n\n def _encode_multi_vision_pre_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=3)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_two_way(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=0) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=0) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_history_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = vision_rgb.view(bs, self.window_size, *vision_rgb.shape[1:])\n _, _, T, p, v_tok, dim = vision_rgb.shape[:6]\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_rgb = vision_rgb + frame_embs\n vision_rgb = rearrange(vision_rgb, 'b F T p v d -> (b F) T p v d')\n vision_rgb = self.perceiver(vision_rgb)\n\n vision_gripper = vision_gripper.view(vision_gripper.shape[0] // self.window_size, self.window_size,\n *vision_gripper.shape[1:])\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_gripper = vision_gripper + frame_embs\n vision_gripper = rearrange(vision_gripper, 'b F T p v d -> (b F) T p v d')\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n \n def _encode_history_vision_fc_post(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = self._encode_vision(vision_rgb)\n vision_rgb = self.perceiver(vision_rgb) # BxL, T, n, d\n vision_rgb = vision_rgb.view(-1, self.window_size, *vision_rgb.shape[1:]) # B, L, T, n, d\n vision_rgb = rearrange(vision_rgb, 'b L T n d -> b T (n L) d')\n\n vision_gripper = self._encode_vision(vision_gripper)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n vision_gripper = vision_gripper.view(-1, self.window_size, *vision_gripper.shape[1:]) # B, L, T, n, d\n vision_gripper = rearrange(vision_gripper, 'b L T n d -> b T (n L) d')\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2)\n\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n \n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x" } ]
from logging import debug from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig from typing import Optional from robot_flamingo.models.flamingo_bc import BCFlamingo from robot_flamingo.models.flamingo_mpt import MPTFlamingo from open_flamingo.src.flamingo_lm import FlamingoLMMixin from open_flamingo.src.utils import extend_instance from open_flamingo.src.factory import _infer_decoder_layers_attr_name import open_clip
13,257
def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name:
mpt_dict = { "mpt_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b", "tokenizer_path": "path_to/mpt-1b-redpajama-200b", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b/checkpoint.pt" }, "mpt_dolly_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b-dolly", "tokenizer_path": "path_to/mpt-1b-redpajama-200b-dolly", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b-langinstruct/checkpoint.pt" }, "mpt_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b-langinstruct/checkpoint.pt" }, "mpt_base_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Base-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Base-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b/checkpoint.pt" }, "mpt_9b": { "lang_encoder_path": "path_to/mpt-7b", "tokenizer_path": "path_to/mpt-7b", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B-vitl-mpt7b/checkpoint.pt" }, "llama_9b": { "lang_encoder_path": "path_to/llama-7b-hf-jxu124", "tokenizer_path": "path_to/llama-7b-hf-jxu124", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B/checkpoint.pt" } } def get_transforms( clip_vision_encoder_path: str = "ViT-L-14", clip_vision_encoder_pretrained: str = "openai", tokenizer_path: str = "path_to/llama-7b-hf-jxu124", use_local_files: bool = False, ): vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) text_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) return image_processor, text_tokenizer def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name:
Model_fn = BCFlamingo
0
2023-11-02 01:36:23+00:00
16k
radekd91/inferno
inferno/datasets/AfewVaDataModule.py
[ { "identifier": "load_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation(filename):\n with open(filename, \"rb\") as f:\n seg = cpkl.load(f, compression='gzip')\n seg_type = seg[0]\n seg_image = seg[1]\n # seg_type = pkl.load(f)\n # seg_image = pkl.load(f)\n return seg_image, seg_type" }, { "identifier": "process_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def process_segmentation(segmentation, seg_type, discarded_labels=None):\n if seg_type == \"face_parsing\":\n discarded_labels = discarded_labels or default_discarded_labels\n # start = timer()\n # segmentation_proc = np.ones_like(segmentation, dtype=np.float32)\n # for label in discarded_labels:\n # segmentation_proc[segmentation == label] = 0.\n segmentation_proc = np.isin(segmentation, discarded_labels)\n segmentation_proc = np.logical_not(segmentation_proc)\n segmentation_proc = segmentation_proc.astype(np.float32)\n # end = timer()\n # print(f\"Segmentation label discarding took {end - start}s\")\n return segmentation_proc\n elif seg_type == \"face_segmentation_focus\":\n segmentation = segmentation > 0.5 \n segmentation = segmentation.astype(np.float32)\n return segmentation\n else:\n raise ValueError(f\"Invalid segmentation type '{seg_type}'\")" }, { "identifier": "load_emotion", "path": "inferno/datasets/IO.py", "snippet": "def load_emotion(filename):\n with open(filename, \"rb\") as f:\n emo = cpkl.load(f, compression='gzip')\n version = emo[0]\n emotion_type = emo[1]\n emotion_features = emo[2]\n return emotion_features, emotion_type" }, { "identifier": "save_emotion", "path": "inferno/datasets/IO.py", "snippet": "def save_emotion(filename, emotion_features, emotion_type, version=0):\n with open(filename, \"wb\") as f:\n # for some reason compressed pickle can only load one object (EOF bug)\n # so put it in the list\n cpkl.dump([version, emotion_type, emotion_features], f, compression='gzip')" }, { "identifier": "numpy_image_to_torch", "path": "inferno/utils/image.py", "snippet": "def numpy_image_to_torch(img : np.ndarray) -> torch.Tensor:\n img = img.transpose([2, 0, 1])\n return torch.from_numpy(img)" }, { "identifier": "KeypointNormalization", "path": "inferno/transforms/keypoints.py", "snippet": "class KeypointNormalization(KeypointTransform):\n\n def __init__(self, scale_x=1., scale_y=1.):\n super().__init__(scale_x, scale_y)\n\n def forward(self, points):\n # normalization the way EMOCA uses it.\n # the keypoints are not used in image space but in normalized space\n # for loss computation\n # the normalization is as follows:\n if isinstance(points, torch.Tensor):\n points_ = points.clone()\n elif isinstance(points, np.ndarray):\n points_ = points.copy()\n else:\n raise ValueError(f\"Invalid type of points {str(type(points))}\")\n points_[..., 0] -= self.scale_x/2\n points_[..., 0] /= self.scale_x/2\n points_[..., 1] -= self.scale_y/2\n points_[..., 1] /= self.scale_y/2\n return points_\n\n def inv(self, points):\n if isinstance(points, torch.Tensor):\n points_ = points.clone()\n elif isinstance(points, np.ndarray):\n points_ = points.copy()\n else:\n raise ValueError(f\"Invalid type of points {str(type(points))}\")\n points_[..., 0] *= self.scale_x / 2\n points_[..., 0] += self.scale_x / 2\n points_[..., 1] *= self.scale_y / 2\n points_[..., 1] += self.scale_y / 2\n return points_" }, { "identifier": "FaceDataModuleBase", "path": "inferno/datasets/FaceDataModuleBase.py", "snippet": "class FaceDataModuleBase(pl.LightningDataModule):\n \"\"\"\n A base data module for face datasets. This DM can be inherited by any face datasets, which just adapt things \n to the dataset's specificities (such as different GT or data storage structure). \n This class can take care of face detection, recognition, segmentation and landmark detection.\n \"\"\"\n\n def __init__(self, root_dir, output_dir, processed_subfolder, device=None,\n face_detector='fan',\n face_detector_threshold=0.9,\n image_size=224,\n scale=1.25,\n bb_center_shift_x=0., # in relative numbers\n bb_center_shift_y=0., # in relative numbers (i.e. -0.1 for 10% shift upwards, ...)\n processed_ext=\".png\", \n save_detection_images=True, \n save_landmarks_frame_by_frame=True, # default\n save_landmarks_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n save_segmentation_frame_by_frame=True, # default\n save_segmentation_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n return_mica_images=False,\n ):\n super().__init__()\n self.root_dir = root_dir\n self.output_dir = output_dir\n self.bb_center_shift_x = bb_center_shift_x\n self.bb_center_shift_y = bb_center_shift_y\n self.processed_ext = processed_ext\n self.save_detection_images=save_detection_images\n self.save_landmarks_frame_by_frame = save_landmarks_frame_by_frame\n self.save_landmarks_one_file = save_landmarks_one_file\n assert not (save_landmarks_one_file and save_landmarks_frame_by_frame) # only one of them can be true\n self.save_segmentation_frame_by_frame = save_segmentation_frame_by_frame\n self.save_segmentation_one_file = save_segmentation_one_file\n assert not (save_segmentation_one_file and save_segmentation_frame_by_frame) # only one of them can be true\n\n if processed_subfolder is None:\n import datetime\n date = datetime.datetime.now()\n processed_folder = os.path.join(output_dir, \"processed_%s\" % date.strftime(\"%Y_%b_%d_%H-%M-%S\"))\n else:\n processed_folder = os.path.join(output_dir, processed_subfolder)\n self.output_dir = processed_folder\n\n self.device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n self.face_detector_type = face_detector\n self.face_detector_threshold = face_detector_threshold\n\n self.image_size = image_size\n self.scale = scale\n self.return_mica_images = return_mica_images\n\n def _get_max_faces_per_image(self): \n return 1\n \n def _is_video_dataset(self): \n return False\n\n # @profile\n def _instantiate_detector(self, overwrite = False, face_detector=None):\n face_detector = face_detector or self.face_detector_type\n if hasattr(self, 'face_detector'):\n if not overwrite:\n return\n del self.face_detector\n if self.face_detector_type == 'fan':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='2D')\n elif self.face_detector_type == 'fan3d':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='3D')\n elif self.face_detector_type == 'mtcnn':\n self.face_detector = MTCNN(self.device)\n elif self.face_detector_type == '3fabrec': \n from inferno.utils.TFabRecLandmarkDetector import TFabRec\n self.face_detector = TFabRec(instantiate_detector='sfd', threshold=self.face_detector_threshold)\n elif self.face_detector_type == 'mediapipe': \n from inferno.utils.MediaPipeLandmarkDetector import MediaPipeLandmarkDetector\n self.face_detector = MediaPipeLandmarkDetector(threshold=self.face_detector_threshold, \n video_based=self._is_video_dataset(), max_faces=self._get_max_faces_per_image())\n elif self.face_detector_type == 'deep3dface': \n from inferno.utils.Deep3DFaceLandmarkDetector import Deep3DFaceLandmarkDetector\n self.face_detector = Deep3DFaceLandmarkDetector(instantiate_detector='mtcnn')\n else:\n raise ValueError(\"Invalid face detector specifier '%s'\" % self.face_detector)\n\n # @profile\n def _detect_faces_in_image(self, image_or_path, detected_faces=None):\n # imagepath = self.imagepath_list[index]\n # imagename = imagepath.split('/')[-1].split('.')[0]\n if isinstance(image_or_path, (str, Path)):\n image = np.array(imread(image_or_path))\n elif isinstance(image_or_path, np.ndarray):\n image = image_or_path\n else: \n raise ValueError(\"Invalid image type '%s'\" % type(image_or_path)) \n \n if len(image.shape) == 2:\n image = np.tile(image[:, :, None], (1, 1, 3))\n if len(image.shape) == 3 and image.shape[2] > 3:\n image = image[:, :, :3]\n\n h, w, _ = image.shape\n self._instantiate_detector()\n bounding_boxes, bbox_type, landmarks = self.face_detector.run(image,\n with_landmarks=True,\n detected_faces=detected_faces)\n image = image / 255.\n detection_images = []\n detection_centers = []\n detection_sizes = []\n detection_landmarks = [] # landmarks wrt the detection image\n # original_landmarks = [] # landmarks wrt the original image\n original_landmarks = landmarks # landmarks wrt the original image\n # detection_embeddings = []\n if len(bounding_boxes) == 0:\n # print('no face detected! run original image')\n return detection_images, detection_centers, detection_images, \\\n bbox_type, detection_landmarks, original_landmarks\n # left = 0\n # right = h - 1\n # top = 0\n # bottom = w - 1\n # bounding_boxes += [[left, right, top, bottom]]\n\n for bi, bbox in enumerate(bounding_boxes):\n left = bbox[0]\n right = bbox[2]\n top = bbox[1]\n bottom = bbox[3]\n old_size, center = bbox2point(left, right, top, bottom, type=bbox_type)\n\n center[0] += abs(right-left)*self.bb_center_shift_x\n center[1] += abs(bottom-top)*self.bb_center_shift_y\n\n size = int(old_size * self.scale)\n\n dst_image, dts_landmark = bbpoint_warp(image, center, size, self.image_size, landmarks=landmarks[bi])\n\n # dst_image = dst_image.transpose(2, 0, 1)\n #\n detection_images += [(dst_image*255).astype(np.uint8)]\n detection_centers += [center]\n detection_sizes += [size]\n\n # imsave(os.path.join(\"detection_%d.png\" % bi), dst_image)\n\n # to be checked\n detection_landmarks += [dts_landmark]\n\n del image\n return detection_images, detection_centers, detection_sizes, bbox_type, detection_landmarks, original_landmarks\n\n # @profile\n def _detect_faces_in_image_wrapper(self, frame_list, fid, out_detection_folder, out_landmark_folder, bb_outfile,\n centers_all, sizes_all, detection_fnames_all, landmark_fnames_all, \n out_landmarks_all=None, out_landmarks_orig_all=None, out_bbox_type_all=None):\n\n if isinstance(frame_list, (str, Path, list)):\\\n # if frame list is a list of image paths\n frame_fname = frame_list[fid]\n # detect faces in each frames\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(Path(self.output_dir) / frame_fname)\n elif isinstance(frame_list, (np.ndarray, types.GeneratorType)): \n # frame_list is an array of many images, or a generator (like a video reader)\n frame_fname =Path(f\"{fid:05d}.png\")\n if isinstance(frame_list, np.ndarray):\n frame = frame_list[fid]\n else: \n frame = next(frame_list)\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(frame)\n # if len(detection_ims) > 0: # debug visualization\n # imsave(frame_fname, detection_ims[0])\n \n # self.detection_lists[sequence_id][fid] += [detections]\n # import plotly.graph_objects as go\n # fig = go.Figure(data=go.Image(z=frame,))\n # fig.show()\n\n \n centers_all += [centers]\n sizes_all += [sizes]\n if out_landmarks_all is not None:\n out_landmarks_all += [landmarks]\n if out_landmarks_orig_all is not None:\n out_landmarks_orig_all += [orig_landmarks]\n if out_bbox_type_all is not None:\n out_bbox_type_all += [[bbox_type]*len(landmarks)]\n\n # save detections\n detection_fnames = []\n landmark_fnames = []\n for di, detection in enumerate(detection_ims):\n # save detection\n stem = frame_fname.stem + \"_%.03d\" % di\n if self.save_detection_images:\n out_detection_fname = out_detection_folder / (stem + self.processed_ext)\n detection_fnames += [out_detection_fname.relative_to(self.output_dir)]\n if self.processed_ext in ['.JPG', '.jpg', \".jpeg\", \".JPEG\"]:\n imsave(out_detection_fname, detection, quality=100)\n else:\n imsave(out_detection_fname, detection)\n # save landmarks\n if self.save_landmarks_frame_by_frame:\n if self.save_detection_images:\n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, landmarks[di], bbox_type)\n else: \n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, orig_landmarks[di], bbox_type)\n\n detection_fnames_all += [detection_fnames]\n landmark_fnames_all += [landmark_fnames]\n\n torch.cuda.empty_cache()\n checkpoint_frequency = 100\n if fid % checkpoint_frequency == 0:\n FaceDataModuleBase.save_detections(bb_outfile, detection_fnames_all, landmark_fnames_all,\n centers_all, sizes_all, fid)\n\n\n def _get_segmentation_method(self): \n return \"focus\"\n # return \"bisenet\"\n\n\n def _segment_images(self, detection_fnames_or_ims, out_segmentation_folder, path_depth = 0, landmarks=None, segmentation_net=None):\n import time\n # segmentation_net = segmentation_net or \"bisenet\"\n segmentation_net = segmentation_net or self._get_segmentation_method()\n if self.save_landmarks_one_file: \n overwrite = False \n # single_out_file = out_segmentation_folder / \"segmentations.pkl\"\n single_out_file = out_segmentation_folder / \"segmentations.hdf5\"\n if single_out_file.is_file() and not overwrite:\n print(f\"Segmentation already found in {single_out_file}, skipping\")\n return\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n net, seg_type, batch_size = self._get_segmentation_net(device, segmentation_net)\n\n # if self.save_detection_images:\n # ref_im = imread(detection_fnames_or_ims[0])\n # else: \n # ref_im = detection_fnames_or_ims[0]\n # ref_size = Resize((ref_im.shape[0], ref_im.shape[1]), interpolation=Image.NEAREST)\n ref_size = None\n\n # transforms = Compose([\n # Resize((512, 512)),\n # Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # ])\n transforms=None\n # batch_size = 16\n\n if isinstance(detection_fnames_or_ims, types.GeneratorType): \n im_read = \"skvreader\"\n elif isinstance(detection_fnames_or_ims, (FFmpegReader)):\n im_read = \"skvffmpeg\"\n else:\n im_read = 'pil' if not isinstance(detection_fnames_or_ims[0], np.ndarray) else None\n\n dataset = UnsupervisedImageDataset(detection_fnames_or_ims, image_transforms=transforms,\n landmark_list = landmarks,\n im_read=im_read)\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=4 if im_read not in [\"skvreader\", \"skvffmpeg\"] else 1, \n shuffle=False)\n\n # import matplotlib.pyplot as plt\n\n if self.save_segmentation_one_file: \n out_segmentation_names = []\n out_segmentations = []\n out_segmentation_types = []\n\n for i, batch in enumerate(tqdm(loader)):\n # facenet_pytorch expects this stanadrization for the input to the net\n # images = fixed_image_standardization(batch['image'].to(device))\n images = batch['image'].cuda()\n # start = time.time()\n with torch.no_grad():\n segmentation = net(images)\n # end = time.time()\n\n if ref_size is None:\n ref_size = Resize((images.shape[2], images.shape[3]), interpolation=Image.NEAREST)\n\n segmentation = ref_size(segmentation)\n segmentation = segmentation.cpu().numpy()\n\n if self.save_segmentation_frame_by_frame:\n start = time.time()\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n # if isinstance(out_segmentation_folder, list):\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = out_segmentation_folder / rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = out_segmentation_folder / (Path(image_path).stem + \".pkl\")\n segmentation_path.parent.mkdir(exist_ok=True, parents=True)\n # im = images[j]\n # im = im.permute(1,2,0).cpu().numpy()\n # from inferno.datasets.IO import process_segmentation \n # import matplotlib.pyplot as plt\n # from inferno.datasets.FaceVideoDataModule import FaceDataModuleBase\n # seg = process_segmentation(segmentation[j], seg_type)\n # imsave(\"seg.png\", seg)\n # imsave(\"im.png\", im)\n # FaceDataModuleBase.vis_parsing_maps(im, segmentation[j], stride=1, save_im=True,\n # save_path='overlay.png')\n # plt.figure()\n # plt.imshow(im)\n # plt.show()\n # plt.figure()\n # plt.imshow(seg[0])\n # plt.show()\n save_segmentation(segmentation_path, segmentation[j], seg_type)\n print(f\" Saving batch {i} took: {end - start}\")\n end = time.time()\n if self.save_segmentation_one_file: \n segmentation_names = []\n segmentations = []\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = Path(image_path).stem \n segmentation_names += [segmentation_path]\n segmentations += [segmentation[j]]\n out_segmentation_names += segmentation_names\n out_segmentations += segmentations\n out_segmentation_types += [seg_type] * len(segmentation_names)\n\n if self.save_landmarks_one_file: \n if single_out_file.suffix == \".pkl\":\n save_segmentation_list(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n elif single_out_file.suffix == \".hdf5\":\n save_segmentation_list_v2(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n print(\"Segmentation saved to %s\" % single_out_file)\n\n\n def _get_segmentation_net(self, device, method='bisenet'):\n if method == 'bisenet':\n seg_type = 'face_parsing'\n if hasattr(self, \"_bisenet\" ): \n net = self._bisenet\n else:\n from inferno.models.external.BiSeNetFaceParsing import BiSeNetFaceParsing\n net = BiSeNetFaceParsing()\n self._bisenet = net\n batch_size = 64\n elif method == \"gpen\": \n seg_type = 'face_parsing_gpen'\n if hasattr(self, \"_gpen\" ): \n net = self._gpen\n else:\n from inferno.models.external.GPENFaceParsing import GPENFaceParsing\n net = GPENFaceParsing()\n self._gpen = net\n batch_size = 16\n elif method == \"focus\": \n seg_type = 'face_segmentation_focus'\n if hasattr(self, \"_focus\" ): \n net = self._focus\n else:\n from inferno.models.external.FocusSegmentation import FocusSegmentation\n net = FocusSegmentation()\n self._focus = net\n batch_size = 16\n # batch_size = 16\n else: \n raise ValueError(f\"Unknown segmentation type: {method}\" )\n\n # from inferno.utils.other import get_path_to_externals\n # path_to_segnet = get_path_to_externals() / \"face-parsing.PyTorch\"\n # if not(str(path_to_segnet) in sys.path or str(path_to_segnet.absolute()) in sys.path):\n # sys.path += [str(path_to_segnet)]\n\n # from model import BiSeNet\n # n_classes = 19\n # net = BiSeNet(n_classes=n_classes)\n # # net.cuda()\n # save_pth = path_to_segnet / 'res' / 'cp' / '79999_iter.pth'\n # net.load_state_dict(torch.load(save_pth))\n # # net.eval()\n # net.eval().to(device)\n\n # labels = {\n # 0: 'background',\n # 1: 'skin',\n # 2: 'nose',\n # 3: 'eye_g',\n # 4: 'l_eye',\n # 5: 'r_eye',\n # 6: 'l_brow',\n # 7: 'r_brow',\n # 8: 'l_ear',\n # 9: 'r_ear',\n # 10: 'mouth',\n # 11: 'u_lip',\n # 12: 'l_lip',\n # 13: 'hair',\n # 14: 'hat',\n # 15: 'ear_r',\n # 16: 'neck_l',\n # 17: 'neck',\n # 18: 'cloth'\n # }\n\n return net, seg_type , batch_size\n\n\n @staticmethod\n def save_landmark_list(fname, landmarks):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n\n @staticmethod\n def load_landmark_list(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n return landmarks\n\n\n @staticmethod\n def save_landmark_list_v2(fname, landmarks, landmark_confidences, landmark_types):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n pkl.dump(landmark_confidences, f)\n pkl.dump(landmark_types, f)\n\n @staticmethod\n def load_landmark_list_v2(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n landmark_confidences = pkl.load(f)\n landmark_types = pkl.load(f)\n return landmarks, landmark_confidences, landmark_types\n\n\n @staticmethod\n def save_detections(fname, detection_fnames, landmark_fnames, centers, sizes, last_frame_id):\n with open(fname, \"wb\" ) as f:\n pkl.dump(detection_fnames, f)\n pkl.dump(centers, f)\n pkl.dump(sizes, f)\n pkl.dump(last_frame_id, f)\n pkl.dump(landmark_fnames, f)\n\n @staticmethod\n def load_detections(fname):\n with open(fname, \"rb\" ) as f:\n detection_fnames = pkl.load(f)\n centers = pkl.load(f)\n sizes = pkl.load(f)\n try:\n last_frame_id = pkl.load(f)\n except:\n last_frame_id = -1\n try:\n landmark_fnames = pkl.load(f)\n except:\n landmark_fnames = [None]*len(detection_fnames)\n\n return detection_fnames, landmark_fnames, centers, sizes, last_frame_id" }, { "identifier": "bbox2point", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbox2point(left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n elif type == 'bbox':\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.12\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])\n elif type == \"mediapipe\":\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n else:\n raise NotImplementedError(f\" bbox2point not implemented for {type} \")\n if isinstance(center_x, np.ndarray):\n center = np.stack([center_x, center_y], axis=1)\n else: \n center = np.array([center_x, center_y])\n return old_size, center" }, { "identifier": "bbpoint_warp", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None, \n order=3 # order of interpolation, bicubic by default\n ):\n target_size_width = target_size_width or target_size_height\n tform = point2transform(center, size, target_size_height, target_size_width)\n tf = tform.inverse if inv else tform\n output_shape = output_shape or (target_size_height, target_size_width)\n dst_image = warp(image, tf, output_shape=output_shape, order=order)\n if landmarks is None:\n return dst_image\n # points need the matrix\n if isinstance(landmarks, np.ndarray):\n assert isinstance(landmarks, np.ndarray)\n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = tf_lmk(landmarks[:, :2])\n elif isinstance(landmarks, list): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = [] \n for i in range(len(landmarks)):\n dst_landmarks += [tf_lmk(landmarks[i][:, :2])]\n elif isinstance(landmarks, dict): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = {}\n for key, value in landmarks.items():\n dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])\n else: \n raise ValueError(\"landmarks must be np.ndarray, list or dict\")\n return dst_image, dst_landmarks" }, { "identifier": "EmotionalImageDatasetBase", "path": "inferno/datasets/EmotionalImageDataset.py", "snippet": "class EmotionalImageDatasetBase(torch.utils.data.Dataset):\n\n\n def _augment(self, img, seg_image, landmark, input_img_shape=None):\n\n if self.transforms is not None:\n assert img.dtype == np.uint8\n # img = img.astype(np.float32) # TODO: debug this (do we get valid images when not used?)\n res = self.transforms(image=img,\n segmentation_maps=seg_image,\n keypoints=landmark)\n if seg_image is not None and landmark is not None:\n img, seg_image, landmark = res\n elif seg_image is not None:\n img, seg_image = res\n elif landmark is not None:\n img, _, landmark = res\n else:\n img = res\n\n \n assert img.dtype == np.uint8\n if img.dtype != np.float32:\n img = img.astype(np.float32) / 255.0\n \n assert img.dtype == np.float32\n\n\n if seg_image is not None:\n seg_image = np.squeeze(seg_image)[..., np.newaxis].astype(np.float32)\n\n if landmark is not None:\n landmark = np.squeeze(landmark)\n if isinstance(self.landmark_normalizer, KeypointScale):\n self.landmark_normalizer.set_scale(\n img.shape[0] / input_img_shape[0],\n img.shape[1] / input_img_shape[1])\n elif isinstance(self.landmark_normalizer, KeypointNormalization):\n self.landmark_normalizer.set_scale(img.shape[0], img.shape[1])\n # self.landmark_normalizer.set_scale(input_img_shape[0], input_img_shape[1])\n else:\n raise ValueError(f\"Unsupported landmark normalizer type: {type(self.landmark_normalizer)}\")\n landmark = self.landmark_normalizer(landmark)\n\n return img, seg_image, landmark\n\n\n\n def visualize_sample(self, sample):\n if isinstance(sample, int):\n sample = self[sample]\n\n import matplotlib.pyplot as plt\n num_images = 1\n if 'mask' in sample.keys():\n num_images += 1\n\n if 'landmark' in sample.keys():\n num_images += 1\n if 'landmark_mediapipe' in sample.keys():\n num_images += 1\n\n if \"mica_images\" in sample.keys():\n num_images += 1\n\n if len(sample[\"image\"].shape) >= 4:\n K = sample[\"image\"].shape[0]\n fig, axs = plt.subplots(K, num_images)\n else:\n K = None\n fig, axs = plt.subplots(1, num_images)\n\n # if K is not None:\n for k in range(K or 1):\n self._plot(axs, K, k, sample)\n plt.show()\n\n def _plot(self, axs, K, k, sample):\n\n from inferno.utils.DecaUtils import tensor_vis_landmarks\n\n def index_axis(i, k):\n if K==1 or K is None:\n return axs[i]\n return axs[k,i]\n\n im = sample[\"image\"][k, ...] if K is not None else sample[\"image\"]\n im_expanded = im[np.newaxis, ...]\n\n i = 0\n f = index_axis(i, k).imshow(im.numpy().transpose([1, 2, 0]))\n index_axis(i, k).set_xlabel('Input image')\n i += 1\n\n if \"mica_images\" in sample.keys():\n mica_im = sample[\"mica_images\"][k, ...] if K is not None else sample[\"mica_images\"]\n mica_im = mica_im.numpy().transpose([1, 2, 0])\n mica_im = (mica_im + 1) / 2\n index_axis(i, k).imshow(mica_im)\n # add a caption to the axes.\n index_axis(i, k).set_xlabel(\"MICA image\")\n i += 1\n\n if 'landmark' in sample.keys():\n lmk = sample[\"landmark\"][k, ...] if K is not None else sample[\"landmark\"]\n lmk_expanded = lmk[np.newaxis, ...]\n lmk_im = tensor_vis_landmarks(im_expanded,\n self.landmark_normalizer.inv(lmk_expanded),\n isScale=False, rgb2bgr=False, scale_colors=True).numpy()[0] \\\n .transpose([1, 2, 0])\n index_axis(i, k).imshow(lmk_im)\n i += 1\n\n if 'landmark_mediapipe' in sample.keys():\n lmk = sample[\"landmark_mediapipe\"][k, ...] if K is not None else sample[\"landmark_mediapipe\"]\n lmk_expanded = lmk[np.newaxis, ...]\n lmk_im = tensor_vis_landmarks(im_expanded,\n self.landmark_normalizer.inv(lmk_expanded),\n isScale=False, rgb2bgr=False, scale_colors=True).numpy()[0] \\\n .transpose([1, 2, 0])\n index_axis(i, k).imshow(lmk_im)\n i += 1\n\n if 'mask' in sample.keys():\n mask = sample[\"mask\"][k, ...] if K is not None else sample[\"mask\"]\n if mask.ndim == 2:\n mask = mask[np.newaxis, ...]\n index_axis(i, k).imshow(mask.numpy().transpose([1, 2, 0]).squeeze(), cmap='gray')\n i += 1\n\n\n if 'path' in sample.keys() and 'label' in sample.keys():\n if K is None:\n print(f\"Path = {sample['path']}\")\n print(f\"Label = {sample['label']}\")\n else:\n print(f\"Path {k} = {sample['path'][k]}\")\n print(f\"Label {k} = {sample['label'][k]}\")" }, { "identifier": "UnsupervisedImageDataset", "path": "inferno/datasets/UnsupervisedImageDataset.py", "snippet": "class UnsupervisedImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, image_list, landmark_list=None, image_transforms=None, im_read=None, \n align_landmarks=False):\n super().__init__()\n self.image_list = image_list\n self.landmark_list = landmark_list\n if landmark_list is not None and len(landmark_list) != len(image_list):\n raise RuntimeError(\"There must be a landmark for every image\")\n self.image_transforms = image_transforms\n self.im_read = im_read or 'skio'\n if self.im_read in ['skvreader', 'skvffmpeg']:\n self.ordered = True\n self.last_index = -1\n else: \n self.ordered = False\n if self.im_read == 'skvffmpeg':\n self.next_frame_it = self.image_list.nextFrame()\n\n if isinstance(self.image_list, np.ndarray): \n self.im_read = None\n\n def __getitem__(self, index):\n if self.ordered: \n if index != self.last_index + 1:\n raise RuntimeError(\"The images must be read in order because of the skvideo reader\")\n self.last_index = index\n # if index < len(self.image_list):\n # x = self.mnist_data[index]\n # raise IndexError(\"Out of bounds\")\n try:\n if isinstance(self.image_list, np.ndarray):\n img = self.image_list[index].transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img)\n path = f\"{index:05d}\"\n elif self.im_read == 'skio':\n img = imread(self.image_list[index])\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img)\n path = str(self.image_list[index])\n elif self.im_read == 'pil':\n img = Image.open(self.image_list[index])\n img_torch = ToTensor()(img)\n path = str(self.image_list[index])\n # path = f\"{index:05d}\"\n elif self.im_read == 'skvreader':\n img = next(self.image_list)\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img) / 255.\n path = f\"{index:05d}\"\n elif self.im_read == 'skvffmpeg':\n img = next(self.next_frame_it)\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img) / 255.\n path = f\"{index:05d}\"\n else:\n raise ValueError(f\"Invalid image reading method {self.im_read}\")\n except Exception as e:\n print(f\"Failed to read '{self.image_list[index]}'. File is probably corrupted. Rerun data processing\")\n raise e\n\n if self.image_transforms is not None:\n img_torch = self.image_transforms(img_torch)\n\n batch = {\"image\" : img_torch,\n \"path\" : path}\n\n if self.landmark_list is not None:\n landmark_type, landmark = load_landmark(self.landmark_list[index])\n landmark_torch = torch.from_numpy(landmark)\n\n if self.image_transforms is not None:\n landmark_torch = self.image_transforms(landmark_torch)\n\n batch[\"landmark\"] = landmark_torch\n\n return batch\n\n def __len__(self):\n if self.im_read in ['skvreader', 'skvffmpeg']:\n return self.image_list.getShape()[0]\n return len(self.image_list)" }, { "identifier": "save_landmark", "path": "inferno/utils/FaceDetector.py", "snippet": "def save_landmark(fname, landmark, landmark_type):\n with open(fname, \"wb\") as f:\n pkl.dump(landmark_type, f)\n pkl.dump(landmark, f)" }, { "identifier": "load_landmark", "path": "inferno/utils/FaceDetector.py", "snippet": "def load_landmark(fname):\n with open(fname, \"rb\") as f:\n landmark_type = pkl.load(f)\n landmark = pkl.load(f)\n return landmark_type, landmark" }, { "identifier": "create_image_augmenter", "path": "inferno/transforms/imgaug.py", "snippet": "def create_image_augmenter(im_size, augmentation=None) -> imgaug.augmenters.Augmenter:\n # augmenter_list = [imgaug.augmenters.Resize(im_size)]\n augmenter_list = []\n if augmentation is not None:\n augmenter_list += [augmenter_from_dict(augmentation)]\n augmenter_list += [imgaug.augmenters.Resize(im_size)]\n augmenter = imgaug.augmenters.Sequential(augmenter_list)\n return augmenter" }, { "identifier": "class_from_str", "path": "inferno/utils/other.py", "snippet": "def class_from_str(str, module=None, none_on_fail = False) -> type:\n if module is None:\n module = sys.modules[__name__]\n if hasattr(module, str):\n cl = getattr(module, str)\n return cl\n elif str.lower() == 'none' or none_on_fail:\n return None\n raise RuntimeError(f\"Class '{str}' not found.\")" } ]
import json import os, sys import numpy as np import scipy as sp import torch import pytorch_lightning as pl import pandas as pd import pickle as pkl import imgaug import traceback import json import bisect import warnings import yaml from enum import Enum from pathlib import Path from skimage.io import imread, imsave from skimage.transform import resize, rescale from inferno.datasets.IO import load_segmentation, process_segmentation, load_emotion, save_emotion from inferno.utils.image import numpy_image_to_torch from inferno.transforms.keypoints import KeypointNormalization from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.datasets.EmotionalImageDataset import EmotionalImageDatasetBase from inferno.datasets.UnsupervisedImageDataset import UnsupervisedImageDataset from inferno.utils.FaceDetector import save_landmark, load_landmark from tqdm import auto from torch.utils.data.dataloader import DataLoader from inferno.transforms.imgaug import create_image_augmenter from torchvision.transforms import Resize, Compose from sklearn.neighbors import NearestNeighbors from torch.utils.data._utils.collate import default_collate from torch.utils.data.sampler import WeightedRandomSampler from collections import OrderedDict from munch import Munch from inferno.utils.other import class_from_str from omegaconf import OmegaConf, DictConfig from inferno.layers.losses.EmonetLoader import get_emonet
12,800
self.v_sample_weights = v_weights bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, va_binnumber = sp.stats.binned_statistic( va[:, 1], None, 'count', bin_1d) a_weights = 1 / va_binnumber a_weights /= np.linalg.norm(a_weights) a_weights *= np.linalg.norm(np.ones_like(a_weights)) self.a_sample_weights = a_weights def __len__(self): # return 100 return self.size def _load_image(self, index): vid_index = bisect.bisect_right(self.video_sizes_cumulative, index) - 1 vid_first_im_index = self.video_sizes_cumulative[vid_index] vid_item = list(self.sample_list.items())[vid_index] vid_name = vid_item[0] vid_gt = vid_item[1] assert vid_gt.video_id == vid_name vid_frame_list = sorted(list(vid_gt['frames'].keys())) selected_frame = vid_frame_list[index - vid_first_im_index] im_rel_path = Path(vid_name) / (selected_frame + self.ext) im_file = Path(self.image_path) / im_rel_path im_file = im_file.parent / (im_file.stem + self.ext) input_img = imread(im_file) # # scale_factor_x = 1.48 # scale_factor_x = 1.25 # # scale_factor_x = 1 # input_img = resize(input_img, (432, 720, 1)) input_img = resize(input_img, (576, 960)) scale_factor_x = 720 / 960 valence = vid_gt['frames'][selected_frame]['valence'] arousal = vid_gt['frames'][selected_frame]['arousal'] facial_landmarks = np.array(vid_gt['frames'][selected_frame]['landmarks']) facial_landmarks[:,0] /= scale_factor_x if self.normalize_va: valence /= 10. arousal /= 10. return input_img, facial_landmarks, valence, arousal, im_file def _load_additional_data(self, im_rel_path): return {} def _get_sample(self, index): num_fails = 0 max_fails = 50 try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) except Exception as e: # if the image is corrupted or missing (there is a few :-/), find some other one while True: num_fails += 1 if num_fails >= max_fails: # something must have gone serious wrong. Nothing loads, so throw an exception raise e index += 1 index = index % len(self) try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) success = True except Exception as e2: success = False if success: break left = facial_landmarks[:,0].min() top = facial_landmarks[:,1].min() right = facial_landmarks[:,0].max() bottom = facial_landmarks[:,1].max() input_img_shape = input_img.shape if not self.use_processed: # Use AffectNet as is provided (their bounding boxes, and landmarks, no segmentation) old_size, center = bbox2point(left, right, top, bottom, type='kpt68') # old_size, center = bbox2point(left, right, top, bottom, type='bbox') size = int(old_size * self.scale) img, landmark = bbpoint_warp(input_img, center, size, self.image_size, landmarks=facial_landmarks) img *= 255. if not self.use_gt_bb: raise NotImplementedError() # landmark_type, landmark = load_landmark( # self.path_prefix / self.landmark_list[index]) landmark = landmark[np.newaxis, ...] seg_image = None else: # use AffectNet processed by me. I used their bounding boxes (to not have to worry about detecting # the correct face in case there's more) and I ran our FAN and segmentation over it img = input_img # the image has already been cropped in preprocessing (make sure the input root path # is specificed to the processed folder and not the original one landmark_path = Path(self.image_path).parent / "landmarks" / im_rel_path landmark_path = landmark_path.parent / (landmark_path.stem + ".pkl") landmark_type, landmark = load_landmark( landmark_path) landmark = landmark[np.newaxis, ...] segmentation_path = Path(self.image_path).parent / "segmentations" / im_rel_path segmentation_path = segmentation_path.parent / (segmentation_path.stem + ".pkl") seg_image, seg_type = load_segmentation( segmentation_path) seg_image = seg_image[np.newaxis, :, :, np.newaxis] seg_image = process_segmentation( seg_image, seg_type).astype(np.uint8) if self.load_emotion_feature: emotion_path = Path(self.image_path).parent / "emotions" / im_rel_path emotion_path = emotion_path.parent / (emotion_path.stem + ".pkl")
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at [email protected] # For commercial licensing contact, please contact [email protected] """ warnings.filterwarnings('ignore') # def make_class_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_va_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_balanced_sample_by_weights(weights): # return WeightedRandomSampler(weights, len(weights)) def new_affewva(class_name): dataset_class = class_from_str(class_name, sys.modules[__name__]) return dataset_class class AfewVaDataModule(FaceDataModuleBase): def __init__(self, input_dir, output_dir, processed_subfolder = None, face_detector='fan', face_detector_threshold=0.9, image_size=224, scale=1.25, bb_center_shift_x=0., bb_center_shift_y=0., processed_ext=".png", device=None, augmentation=None, train_batch_size=64, val_batch_size=64, test_batch_size=64, num_workers=0, ring_type=None, ring_size=None, drop_last=False, sampler=None, split_seed=0, train_fraction=0.6, val_fraction=0.2, test_fraction=0.2, k_fold_crossvalidation=None, k_index=None, dataset_type=None, ): super().__init__(input_dir, output_dir, processed_subfolder, face_detector=face_detector, face_detector_threshold=face_detector_threshold, image_size=image_size, bb_center_shift_x=bb_center_shift_x, bb_center_shift_y=bb_center_shift_y, scale=scale, processed_ext=processed_ext, device=device) self.dataset_type = dataset_type or "AfewVa" # # self.subsets = sorted([f.name for f in (Path(input_dir) / "Manually_Annotated" / "Manually_Annotated_Images").glob("*") if f.is_dir()]) # self.input_dir = Path(self.root_dir) / "Manually_Annotated" / "Manually_Annotated_Images" # train = pd.read_csv(self.input_dir.parent / "training.csv") # val = pd.read_csv(self.input_dir.parent / "validation.csv") # self.df = pd.concat([train, val], ignore_index=True, sort=False) self.face_detector_type = 'fan' self.scale = scale self.use_processed = False if not (Path(self.output_dir) / "gt.pkl").exists(): video_list = sorted([p for p in Path(input_dir).glob("*") if p.is_dir()]) video_gts = OrderedDict() for iv, vp in enumerate(auto.tqdm(video_list)): video_gts[vp.stem] = Munch( json.load(open(vp / (vp.stem + ".json"), "r"))) with open(Path(self.output_dir) / "gt.pkl", "wb") as f: pkl.dump(video_gts, f) else: with open(Path(self.output_dir) / "gt.pkl", "rb") as f: video_gts = pkl.load(f) if self.use_processed: self.image_path = Path(self.output_dir) / "detections" else: self.image_path = Path(input_dir) self.seed = split_seed np.random.seed(self.seed) indices = np.arange(len(video_gts), dtype=np.int32) + 1 np.random.shuffle(indices) if k_fold_crossvalidation is not None: training_indices = [] validation_indices = [] for k in range(k_fold_crossvalidation): start_i = (k * len(indices)) // k_fold_crossvalidation end_i = ((k + 1) * len(indices)) // k_fold_crossvalidation training_indices += [np.concatenate([indices[0:(start_i)], indices[end_i:]])] validation_indices += [indices[start_i:end_i]] self.train_indices = training_indices[k_index] self.val_indices = validation_indices[k_index] self.test_indices = np.copy(validation_indices[k_index]) else: self.train_fraction = train_fraction self.val_fraction = val_fraction self.test_fraction = test_fraction assert self.train_fraction + self.val_fraction + self.test_fraction == 1.0 train_end = int(len(indices) * self.train_fraction) val_end = int(len(indices) * ( self.train_fraction + self.val_fraction)) self.train_indices = indices[:train_end] self.val_indices = indices[train_end:val_end] self.test_indices = indices[val_end:] # iterate over the training indices and create a list of the corresponding video names self.train_list = OrderedDict() self.val_list = OrderedDict() self.test_list = OrderedDict() for tr_i in self.train_indices: self.train_list[f"{tr_i:03d}"] = video_gts[f"{tr_i:03d}"] for v_i in self.val_indices: self.val_list[f"{v_i:03d}"] = video_gts[f"{v_i:03d}"] for t_i in self.test_indices: self.test_list[f"{t_i:03d}"] = video_gts[f"{t_i:03d}"] # self.ignore_invalid = ignore_invalid self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.test_batch_size = test_batch_size self.num_workers = num_workers self.augmentation = augmentation self.sampler = sampler or "uniform" if self.sampler not in ["uniform", "balanced_videos", "balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise ValueError(f"Invalid sampler type: '{self.sampler}'") if self.sampler in ["balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise NotImplementedError() if ring_type not in [None, "gt_va", "augment"]: raise ValueError(f"Invalid ring type: '{ring_type}'") if ring_type == "gt_va": raise NotImplementedError() self.ring_type = ring_type self.ring_size = ring_size self.drop_last = drop_last @property def subset_size(self): return 1000 # @property # def num_subsets(self): # num_subsets = len(self.df) // self.subset_size # if len(self.df) % self.subset_size != 0: # num_subsets += 1 # return num_subsets def _detect_faces(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._detect_landmarks_and_segment_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _extract_emotion_features(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._extract_emotion_features_from_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _path_to_detections(self): return Path(self.output_dir) / "detections" def _path_to_segmentations(self): return Path(self.output_dir) / "segmentations" def _path_to_landmarks(self): return Path(self.output_dir) / "landmarks" def _path_to_emotions(self): return Path(self.output_dir) / "emotions" def _get_emotion_net(self, device): net = get_emonet() net = net.to(device) return net, "emo_net" def _extract_emotion_features_from_subset(self, start_i, end_i): self._path_to_emotions().mkdir(parents=True, exist_ok=True) print(f"Processing subset {start_i // self.subset_size}") image_file_list = [] for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] in_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + ".png") if in_detection_fname.is_file(): image_file_list += [in_detection_fname] transforms = Compose([ Resize((256, 256)), ]) batch_size = 32 dataset = UnsupervisedImageDataset(image_file_list, image_transforms=transforms, im_read='pil') loader = DataLoader(dataset, batch_size=batch_size, num_workers=4, shuffle=False) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) net, emotion_type = self._get_emotion_net(device) for i, batch in enumerate(auto.tqdm(loader)): # facenet_pytorch expects this stanadrization for the input to the net # images = fixed_image_standardization(batch['image'].to(device)) images = batch['image'].cuda() # start = time.time() with torch.no_grad(): out = net(images, intermediate_features=True) # end = time.time() # print(f" Inference batch {i} took : {end - start}") emotion_features = {key : val.detach().cpu().numpy() for key, val in out.items()} # start = time.time() for j in range(images.size()[0]): image_path = batch['path'][j] out_emotion_folder = self._path_to_emotions() / Path(image_path).parent.name out_emotion_folder.mkdir(exist_ok=True, parents=True) emotion_path = out_emotion_folder / (Path(image_path).stem + ".pkl") emotion_feature_j = {key: val[j] for key, val in emotion_features.items()} del emotion_feature_j['emo_feat'] # too large to be stored per frame = (768, 64, 64) del emotion_feature_j['heatmap'] # not too large but probably not usefull = (68, 64, 64) # we are keeping emo_feat_2 (output of last conv layer (before FC) and then the outputs of the FCs - expression, valence and arousal) save_emotion(emotion_path, emotion_feature_j, emotion_type) def _detect_landmarks_and_segment_subset(self, start_i, end_i): self._path_to_detections().mkdir(parents=True, exist_ok=True) self._path_to_segmentations().mkdir(parents=True, exist_ok=True) self._path_to_landmarks().mkdir(parents=True, exist_ok=True) detection_fnames = [] out_segmentation_folders = [] status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) completed = status_array[start_i // self.subset_size] if not completed: print(f"Processing subset {start_i // self.subset_size}") for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] left = self.df.loc[i]["face_x"] top = self.df.loc[i]["face_y"] right = left + self.df.loc[i]["face_width"] bottom = top + self.df.loc[i]["face_height"] bb = np.array([top, left, bottom, right]) im_fullfile = Path(self.input_dir) / im_file try: detection, _, _, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(im_fullfile, detected_faces=[bb]) except Exception as e: # except ValueError as e: print(f"Failed to load file:") print(f"{im_fullfile}") print(traceback.print_exc()) continue # except SyntaxError as e: # print(f"Failed to load file:") # print(f"{im_fullfile}") # print(traceback.print_exc()) # continue out_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + self.processed_ext) # detection_fnames += [out_detection_fname.relative_to(self.output_dir)] out_detection_fname.parent.mkdir(exist_ok=True) detection_fnames += [out_detection_fname] if self.processed_ext in [".jpg", ".JPG"]: imsave(out_detection_fname, detection[0], quality=100) else: imsave(out_detection_fname, detection[0]) # out_segmentation_folders += [self._path_to_segmentations() / Path(im_file).parent] # save landmarks out_landmark_fname = self._path_to_landmarks() / Path(im_file).parent / (Path(im_file).stem + ".pkl") out_landmark_fname.parent.mkdir(exist_ok=True) # landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)] save_landmark(out_landmark_fname, landmarks[0], bbox_type) self._segment_images(detection_fnames, self._path_to_segmentations(), path_depth=1) status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r+', shape=(self.num_subsets,) ) status_array[start_i // self.subset_size] = True status_array.flush() del status_array print(f"Processing subset {start_i // self.subset_size} finished") else: print(f"Subset {start_i // self.subset_size} is already processed") @property def status_array_path(self): return Path(self.output_dir) / "status.memmap" @property def is_processed(self): status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) all_processed = status_array.all() return all_processed def prepare_data(self): pass # if self.use_processed: # if not self.status_array_path.is_file(): # print(f"Status file does not exist. Creating '{self.status_array_path}'") # self.status_array_path.parent.mkdir(exist_ok=True, parents=True) # status_array = np.memmap(self.status_array_path, # dtype=np.bool, # mode='w+', # shape=(self.num_subsets,) # ) # status_array[...] = False # del status_array # # all_processed = self.is_processed # if not all_processed: # self._detect_faces() # # # if self.ring_type == "emonet_feature": # self._prepare_emotion_retrieval() def _new_training_set(self, for_training=True): if for_training: im_transforms_train = create_image_augmenter(self.image_size, self.augmentation) if self.ring_type == "emonet_feature": prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' self._load_retrieval_arrays(prefix, feature_label) nn_indices = self.nn_indices_array nn_distances = self.nn_distances_array else: nn_indices = None nn_distances = None return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, im_transforms_train, ring_type=self.ring_type, ring_size=self.ring_size, load_emotion_feature=False, nn_indices_array=nn_indices, nn_distances_array= nn_distances, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, load_emotion_feature=True, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def setup(self, stage=None): self.training_set = self._new_training_set() self.validation_set = new_affewva(self.dataset_type)(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = new_affewva(self.dataset_type)(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) # if self.mode in ['all', 'manual']: # # self.image_list += sorted(list((Path(self.path) / "Manually_Annotated").rglob(".jpg"))) # self.dataframe = pd.load_csv(self.path / "Manually_Annotated" / "Manually_Annotated.csv") # if self.mode in ['all', 'automatic']: # # self.image_list += sorted(list((Path(self.path) / "Automatically_Annotated").rglob("*.jpg"))) # self.dataframe = pd.load_csv( # self.path / "Automatically_Annotated" / "Automatically_annotated_file_list.csv") def train_dataloader(self): if self.sampler == "uniform": sampler = None else: raise NotImplementedError() # elif self.sampler == "balanced_expr": # sampler = make_class_balanced_sampler(self.training_set.df["expression"].to_numpy()) # elif self.sampler == "balanced_va": # sampler = make_balanced_sample_by_weights(self.training_set.va_sample_weights) # elif self.sampler == "balanced_v": # sampler = make_balanced_sample_by_weights(self.training_set.v_sample_weights) # elif self.sampler == "balanced_a": # sampler = make_balanced_sample_by_weights(self.training_set.a_sample_weights) # else: # raise ValueError(f"Invalid sampler value: '{self.sampler}'") dl = DataLoader(self.training_set, shuffle=sampler is None, num_workers=self.num_workers, pin_memory=True, batch_size=self.train_batch_size, drop_last=self.drop_last, sampler=sampler) return dl def val_dataloader(self): return DataLoader(self.validation_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] def _get_retrieval_array(self, prefix, feature_label, dataset_size, feature_shape, feature_dtype, modifier='w+'): outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if outfile_name.is_file() and modifier != 'r': raise RuntimeError(f"The retrieval array already exists! '{outfile_name}'") shape = tuple([dataset_size] + list(feature_shape)) outfile_name.parent.mkdir(exist_ok=True, parents=True) array = np.memmap(outfile_name, dtype=feature_dtype, mode=modifier, shape=shape ) return array def _path_to_emotion_nn_indices_file(self, prefix, feature_label): nn_indices_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_indices.memmap") return nn_indices_file def _path_to_emotion_nn_distances_file(self, prefix, feature_label): nn_distances_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_distances.memmap") return nn_distances_file def _path_to_emotion_nn_retrieval_file(self, prefix, feature_label): outfile_name = Path(self.output_dir) / "cache" / (prefix + feature_label + ".memmap") return outfile_name def _load_retrieval_arrays(self, prefix, feature_label): # prefix = self.mode + "_train_" # if self.ignore_invalid: # prefix += "valid_only_" # feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) try: with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "rb") as f: indices_array_dtype = pkl.load(f) indices_array_shape = pkl.load(f) except: indices_array_dtype = np.int64, indices_array_shape = (len(dataset), NUM_NEIGHBORS) try: with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "rb") as f: distances_array_dtype = pkl.load(f) distances_array_shape = pkl.load(f) except: distances_array_dtype = np.float32, distances_array_shape = (len(dataset), NUM_NEIGHBORS) self.nn_indices_array = np.memmap(nn_indices_file, # dtype=np.int32, dtype=indices_array_dtype, mode="r", shape=indices_array_shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances_array_dtype, # dtype=np.float64, mode="r", shape=distances_array_shape ) def _prepare_emotion_retrieval(self): prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) NUM_NEIGHBORS = 100 if nn_indices_file.is_file() and nn_distances_file.is_file(): print("Precomputed nn arrays found.") return dataset = self._new_training_set(for_training=False) dl = DataLoader(dataset, shuffle=False, num_workers=self.num_workers, batch_size=self.train_batch_size) array = None if self.ring_type != "emonet_feature": raise ValueError(f"Invalid ring type for emotion retrieval {self.ring_type}") outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if not outfile_name.is_file(): for bi, batch in enumerate(auto.tqdm(dl)): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] if array is None: array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype) # for i in range(feat.shape[0]): # idx = bi*self.train_batch_size + i array[bi*self.train_batch_size:bi*self.train_batch_size + feat.shape[0], ...] = feat del array else: print(f"Feature array found in '{outfile_name}'") for bi, batch in enumerate(dl): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] break array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype, modifier='r') nbrs = NearestNeighbors(n_neighbors=30, algorithm='auto', n_jobs=-1).fit(array) distances, indices = nbrs.kneighbors(array, NUM_NEIGHBORS) indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="w+", shape=indices.shape ) indices_array[...] = indices del indices_array distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="w+", shape=distances.shape ) distances_array[...] = distances del distances_array # save sizes a dtypes with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(indices.dtype, f) pkl.dump(indices.shape, f) with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(distances.dtype, f) pkl.dump(distances.shape, f) self.nn_indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="r", shape=indices.shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="r", shape=distances.shape ) class AfewVaDataVisTestModule(AfewVaDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setup(self, stage=None): self.training_set = None self.validation_set = TestSubsetAfewVa(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = TestSubsetAfewVa(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def val_dataloader(self): return DataLoader(self.validation_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] class AfewVa(EmotionalImageDatasetBase): def __init__(self, image_path, sample_list, image_size, scale = 1.4, transforms : imgaug.augmenters.Augmenter = None, use_gt_bb=True, bb_center_shift_x=0.0, bb_center_shift_y=0.0, ring_type=None, ring_size=None, load_emotion_feature=False, nn_indices_array=None, nn_distances_array=None, ext=".png", use_processed = None, normalize_va = None, ): self.sample_list = sample_list self.image_path = image_path self.image_size = image_size self.use_gt_bb = use_gt_bb # self.transforms = transforms or imgaug.augmenters.Identity() self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.scale = scale self.landmark_normalizer = KeypointNormalization() self.use_processed = use_processed or False self.normalize_va = normalize_va or True # normalize va to <-1,1> # self.ignore_invalid = ignore_invalid self.load_emotion_feature = load_emotion_feature self.nn_distances_array = nn_distances_array self.ext=ext # # if ignore_invalid: # # filter invalid classes # ignored_classes = [AffectNetExpressions.Uncertain.value, AffectNetExpressions.Occluded.value] # self.df = self.df[self.df["expression"].isin(ignored_classes) == False] # # self.df = self.df.drop(self.df[self.df["expression"].isin(ignored_classes)].index) # # # filter invalid va values # self.df = self.df[self.df.valence != -2.] # # self.df = self.df.drop(self.df.valence == -2.) # self.df = self.df[self.df.arousal != -2.] # # self.df = self.df.drop(self.df.arousal == -2.) # # valid_indices = np.logical_not(pd.isnull(self.df)) # # valid_indices = self.df.index # self.df = self.df.reset_index(drop=True) # # if nn_indices_array is not None and nn_indices_array.shape[0] != len(self.df): # # nn_indices_array = nn_indices_array[valid_indices, ...] # # if nn_distances_array is not None and nn_distances_array.shape[0] != len(self.df): # # nn_distances_array = nn_distances_array[valid_indices, ...] # # self.exp_weights = self.df["expression"].value_counts(normalize=True).to_dict() # self.exp_weight_tensor = torch.tensor([self.exp_weights[i] for i in range(len(self.exp_weights))], dtype=torch.float32) # self.exp_weight_tensor = 1. / self.exp_weight_tensor # self.exp_weight_tensor /= torch.norm(self.exp_weight_tensor) self.size = 0 self.video_sizes_cumulative = [0] for vid_name, gt in self.sample_list.items(): self.size += len(gt.frames) self.video_sizes_cumulative += [self.size] if ring_type not in [None, "gt_expression", "gt_va", "emonet_feature", "emonet_va", "emonet_expression", "augment"]: raise ValueError(f"Invalid ring type '{ring_type}'") if ring_type == "emonet_expression" and ( nn_indices_array is None or nn_distances_array is None ): raise ValueError(f"If ring type set to '{ring_type}', nn files must be specified") self.ring_type = ring_type self.ring_size = ring_size # self._init_sample_weights() def _init_sample_weights(self): raise NotImplementedError() if self.ring_type == "gt_expression": grouped = self.df.groupby(['expression']) self.expr2sample = grouped.groups elif self.ring_type == "emonet_expression": raise NotImplementedError() else: self.expr2sample = None va = self.df[["valence", "arousal"]].to_numpy() sampling_rate = 0.1 # bin_1d = np.arange(-1.,1.+sampling_rate, sampling_rate) bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, y_ed, va_binnumber = sp.stats.binned_statistic_2d( va[:, 0], va[:, 1], None, 'count', [bin_1d, bin_1d], expand_binnumbers=False) va_weights = 1 / va_binnumber va_weights /= np.linalg.norm(va_weights) va_weights *= np.linalg.norm(np.ones_like(va_weights)) self.va_sample_weights = va_weights if self.ring_type == "gt_va": raise NotImplementedError() self.bins_to_samples = {} self.va_bin_indices = va_binnumber bin_indices = np.unique(va_binnumber) for bi in bin_indices: self.bins_to_samples[bi] = np.where(va_binnumber == bi)[0] elif self.ring_type == "emonet_va": raise NotImplementedError() else: self.bins_to_samples = {} if self.ring_type == "emonet_feature": raise NotImplementedError() if len(self) != self.nn_distances_array.shape[0] or len(self) != self.nn_indices_array.shape[0]: raise RuntimeError("The lengths of the dataset does not correspond to size of the nn_array. " "The sizes should be equal. Sth fishy is happening") # self.nn_indices_array = self.nn_indices_array self.nn_distances_array = nn_distances_array else: self.nn_indices_array = None self.nn_distances_array = None # v = self.df[["valence"]].to_numpy() sampling_rate = 0.1 bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, va_binnumber = sp.stats.binned_statistic( va[:, 0], None, 'count', bin_1d) v_weights = 1 / va_binnumber v_weights /= np.linalg.norm(v_weights) v_weights *= np.linalg.norm(np.ones_like(v_weights)) self.v_sample_weights = v_weights bin_1d = np.arange(-1.,1., sampling_rate) stat, x_ed, va_binnumber = sp.stats.binned_statistic( va[:, 1], None, 'count', bin_1d) a_weights = 1 / va_binnumber a_weights /= np.linalg.norm(a_weights) a_weights *= np.linalg.norm(np.ones_like(a_weights)) self.a_sample_weights = a_weights def __len__(self): # return 100 return self.size def _load_image(self, index): vid_index = bisect.bisect_right(self.video_sizes_cumulative, index) - 1 vid_first_im_index = self.video_sizes_cumulative[vid_index] vid_item = list(self.sample_list.items())[vid_index] vid_name = vid_item[0] vid_gt = vid_item[1] assert vid_gt.video_id == vid_name vid_frame_list = sorted(list(vid_gt['frames'].keys())) selected_frame = vid_frame_list[index - vid_first_im_index] im_rel_path = Path(vid_name) / (selected_frame + self.ext) im_file = Path(self.image_path) / im_rel_path im_file = im_file.parent / (im_file.stem + self.ext) input_img = imread(im_file) # # scale_factor_x = 1.48 # scale_factor_x = 1.25 # # scale_factor_x = 1 # input_img = resize(input_img, (432, 720, 1)) input_img = resize(input_img, (576, 960)) scale_factor_x = 720 / 960 valence = vid_gt['frames'][selected_frame]['valence'] arousal = vid_gt['frames'][selected_frame]['arousal'] facial_landmarks = np.array(vid_gt['frames'][selected_frame]['landmarks']) facial_landmarks[:,0] /= scale_factor_x if self.normalize_va: valence /= 10. arousal /= 10. return input_img, facial_landmarks, valence, arousal, im_file def _load_additional_data(self, im_rel_path): return {} def _get_sample(self, index): num_fails = 0 max_fails = 50 try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) except Exception as e: # if the image is corrupted or missing (there is a few :-/), find some other one while True: num_fails += 1 if num_fails >= max_fails: # something must have gone serious wrong. Nothing loads, so throw an exception raise e index += 1 index = index % len(self) try: input_img, facial_landmarks, valence, arousal, image_path = self._load_image(index) additional_data = self._load_additional_data(Path(image_path).relative_to(self.image_path)) success = True except Exception as e2: success = False if success: break left = facial_landmarks[:,0].min() top = facial_landmarks[:,1].min() right = facial_landmarks[:,0].max() bottom = facial_landmarks[:,1].max() input_img_shape = input_img.shape if not self.use_processed: # Use AffectNet as is provided (their bounding boxes, and landmarks, no segmentation) old_size, center = bbox2point(left, right, top, bottom, type='kpt68') # old_size, center = bbox2point(left, right, top, bottom, type='bbox') size = int(old_size * self.scale) img, landmark = bbpoint_warp(input_img, center, size, self.image_size, landmarks=facial_landmarks) img *= 255. if not self.use_gt_bb: raise NotImplementedError() # landmark_type, landmark = load_landmark( # self.path_prefix / self.landmark_list[index]) landmark = landmark[np.newaxis, ...] seg_image = None else: # use AffectNet processed by me. I used their bounding boxes (to not have to worry about detecting # the correct face in case there's more) and I ran our FAN and segmentation over it img = input_img # the image has already been cropped in preprocessing (make sure the input root path # is specificed to the processed folder and not the original one landmark_path = Path(self.image_path).parent / "landmarks" / im_rel_path landmark_path = landmark_path.parent / (landmark_path.stem + ".pkl") landmark_type, landmark = load_landmark( landmark_path) landmark = landmark[np.newaxis, ...] segmentation_path = Path(self.image_path).parent / "segmentations" / im_rel_path segmentation_path = segmentation_path.parent / (segmentation_path.stem + ".pkl") seg_image, seg_type = load_segmentation( segmentation_path) seg_image = seg_image[np.newaxis, :, :, np.newaxis] seg_image = process_segmentation( seg_image, seg_type).astype(np.uint8) if self.load_emotion_feature: emotion_path = Path(self.image_path).parent / "emotions" / im_rel_path emotion_path = emotion_path.parent / (emotion_path.stem + ".pkl")
emotion_features, emotion_type = load_emotion(emotion_path)
2
2023-11-07 20:13:32+00:00
16k
hxz393/ConfigCenterComparer
ui/action_start.py
[ { "identifier": "COL_INFO", "path": "config/settings.py", "snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\n \"test_value\": {\"col\": 7},\n \"test_time\": {\"col\": 8},\n \"dev_value\": {\"col\": 9},\n \"dev_time\": {\"col\": 10},\n \"consistency\": {\"col\": 11},\n \"skip\": {\"col\": 12},\n\n}" }, { "identifier": "get_resource_path", "path": "lib/get_resource_path.py", "snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对路径,可以是字符串或 os.PathLike 对象。\n :rtype: Optional[str]\n :return: 资源的绝对路径,如果发生错误则返回 None。\n \"\"\"\n\n try:\n base_path = getattr(sys, '_MEIPASS', os.path.abspath(\".\"))\n return os.path.join(base_path, os.path.normpath(relative_path))\n except Exception:\n logger.exception(\"An error occurred while retrieving resource path\")\n return None" }, { "identifier": "execute_queries", "path": "module/execute_queries.py", "snippet": "def execute_queries(config_connection: Dict[str, Dict[str, Union[Dict[str, str], bool]]],\n config_main: Dict[str, str]) -> Tuple[Dict[str, Dict[str, str]], Dict[str, bool]]:\n \"\"\"\n 执行数据库查询并返回格式化后的结果和查询状态。\n\n 此函数接收数据库连接配置和主要配置参数。它首先根据主配置生成SQL查询语句,然后对每个数据库环境执行查询。查询结果将被格式化,并更新查询状态。\n\n :param config_connection: 数据库连接配置,包含环境名称和对应的数据库配置。\n :type config_connection: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n :param config_main: 主要配置参数,用于生成查询SQL语句。\n :type config_main: Dict[str, str]\n :return: 包含格式化查询结果的字典和每个环境的查询状态。\n :rtype: Tuple[Dict[str, Dict[str, str]], Dict[str, bool]]\n\n :example:\n >>> os.chdir(os.path.dirname(os.getcwd()))\n >>> connection = {\"dev\": {'mysql_on': True, 'ssh_on': False, 'mysql': {'host': '192.168.2.204', \"port\": \"3306\", \"user\": \"root\", \"password\": \"QeqAr:%R+s5:hYnr\", \"db\": \"ApolloConfigDB_dev\"}}}\n >>> main = {'config_center': 'Apollo', 'apollo_name': 'AppId', 'fix_name_before': '', 'fix_name_after': '', 'fix_name_left': '', 'fix_name_right': '',}\n >>> results, statuses = execute_queries(connection, main)\n >>> assert type(results) == dict\n >>> assert type(statuses) == dict\n >>> statuses\n >>> results\n \"\"\"\n query_statuses = {env_name: False for env_name in config_connection.keys()}\n formatted_results = {}\n\n try:\n query_sql = get_query_sql(config_main)\n\n for env_name, db_config in config_connection.items():\n # 获取指定环境的查询结果\n query_results = get_query_result(db_config, query_sql)\n logger.debug(f\"ENV: {env_name}, SQL query finished.\")\n if query_results:\n query_statuses[env_name] = True\n # 格式化查询结果\n format_query_results(query_results, env_name, config_main, formatted_results)\n else:\n logger.warning(f\"No results obtained from database query for environment: {env_name}\")\n\n # 通过对比过滤列表,得到是否过滤信息,更新到结果字典\n update_skip_status(formatted_results)\n # 查询各配置环境的值,得到一致性信息,更新到结果字典。只对比查询成功的环境\n update_consistency_status(formatted_results, query_statuses)\n logger.debug(\"Status update finished.\")\n\n return formatted_results, query_statuses\n except Exception:\n logger.exception(\"Exception occurred during executing queries\")\n return {}, query_statuses" }, { "identifier": "ConfigManager", "path": "ui/config_manager.py", "snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")" }, { "identifier": "FilterBar", "path": "ui/filter_bar.py", "snippet": "class FilterBar(QWidget):\n \"\"\"\n 过滤栏类,用于在用户界面中提供过滤和搜索功能。\n\n 此类创建了一个包含服务过滤、表格状态过滤和搜索框的组件,使用户能够根据不同条件过滤表格数据。\n\n :param lang_manager: 语言管理器,用于处理界面语言的更新。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器,用于获取和更新配置信息。\n :type config_manager: ConfigManager\n :param table: 要应用过滤的表格。\n :type table: TableMain\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n table: TableMain):\n super().__init__()\n # 实例化组件。\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.lang = self.lang_manager.get_lang()\n self.config_manager = config_manager\n self.table = table\n self.highlight_rows = []\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面。\n\n 创建并布局过滤栏中的所有组件,包括服务过滤、表格状态过滤和搜索值输入框。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 修改字体大小\n self.setStyleSheet(\"font-size: 14px;\")\n # 建立横向主布局\n self.layout = QHBoxLayout(self)\n # 创建过滤服务组件\n self._create_filter_app()\n # 创建过滤列表组件\n self._create_filter_table()\n # 创建搜索过滤值组件\n self._create_filter_value()\n # 设置布局的内容边距\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.layout)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 重新获取语言字典\n self.lang = self.lang_manager.get_lang()\n # 遍历filter_table下拉框中的所有项\n for index in range(self.filter_table_box.count()):\n # 检查数据值是否匹配\n if self.filter_table_box.itemData(index) == \"all\":\n # 更新显示值\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_3'])\n elif self.filter_table_box.itemData(index) == \"fully\":\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_4'])\n elif self.filter_table_box.itemData(index) == \"partially\":\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_5'])\n elif self.filter_table_box.itemData(index) == \"skip\":\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_6'])\n elif self.filter_table_box.itemData(index) == \"fully+skip\":\n self.filter_table_box.setItemText(index, f\"{self.lang['ui.filter_bar_4']}+{self.lang['ui.filter_bar_6']}\")\n elif self.filter_table_box.itemData(index) == \"fully+partially\":\n self.filter_table_box.setItemText(index, f\"{self.lang['ui.filter_bar_4']}+{self.lang['ui.filter_bar_5']}\")\n elif self.filter_table_box.itemData(index) == \"fully+partially+skip\":\n self.filter_table_box.setItemText(index, f\"{self.lang['ui.filter_bar_4']}+{self.lang['ui.filter_bar_5']}+{self.lang['ui.filter_bar_6']}\")\n # 直接更新服务名过滤默认选项文字\n self.filter_app_box.setItemText(0, self.lang['ui.filter_bar_3'])\n # 更新其他文字\n self.filter_app_label.setText(self.lang['ui.filter_bar_1'])\n self.filter_table_label.setText(self.lang['ui.filter_bar_2'])\n self.filter_table_check_box.setText(self.lang['ui.filter_bar_7'])\n self.filter_value_label.setText(self.lang['ui.filter_bar_8'])\n self.filter_value_button.setText(self.lang['ui.filter_bar_9'])\n self.filter_reset_button.setText(self.lang['ui.filter_bar_10'])\n\n def _create_filter_app(self) -> None:\n \"\"\"\n 创建服务过滤组件。\n\n 此方法初始化服务过滤下拉框,并设置其事件处理函数。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建立标签,加入主布局\n self.filter_app_label = QLabel()\n self.layout.addWidget(self.filter_app_label)\n # 过滤服务下拉框\n self.filter_app_box = QComboBox()\n # 设置下拉框的尺寸策略和宽度\n self.filter_app_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n # 设置最大宽度,以免拉伸太长\n self.filter_app_box.setMinimumWidth(100)\n self.filter_app_box.setMaximumWidth(300)\n # 设置下拉框的事件处理\n self.filter_app_box.currentIndexChanged.connect(self.filter_table)\n # 设置下拉框的选项,通过函数填充\n self.filter_options_add()\n self.layout.addWidget(self.filter_app_box)\n # 创建一个 QFrame 作为分割线\n self._create_separator()\n\n def _create_filter_table(self) -> None:\n \"\"\"\n 创建表格状态过滤组件。\n\n 此方法初始化表格状态过滤下拉框和反向选择复选框,并设置它们的事件处理函数。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建立标签,加入主布局\n self.filter_table_label = QLabel()\n self.layout.addWidget(self.filter_table_label)\n # 过滤列表下拉框\n self.filter_table_box = QComboBox()\n # 设置最小宽度,以免文字放不下\n self.filter_table_box.setMinimumWidth(270)\n # 设置下拉框的选项\n self.filter_table_box.addItem(\"\", \"all\")\n self.filter_table_box.addItem(\"\", \"fully\")\n self.filter_table_box.addItem(\"\", \"partially\")\n self.filter_table_box.addItem(\"\", \"skip\")\n self.filter_table_box.addItem(\"\", \"fully+skip\")\n self.filter_table_box.addItem(\"\", \"fully+partially\")\n self.filter_table_box.addItem(\"\", \"fully+partially+skip\")\n # 设置下拉框的事件处理\n self.filter_table_box.currentIndexChanged.connect(self.filter_table)\n self.layout.addWidget(self.filter_table_box)\n # 反向选择\n self.filter_table_check_box = QCheckBox()\n self.filter_table_check_box.stateChanged.connect(self.filter_table)\n self.layout.addWidget(self.filter_table_check_box)\n # 创建一个 QFrame 作为分割线\n self._create_separator()\n\n def _create_filter_value(self) -> None:\n \"\"\"\n 创建搜索过滤组件。\n\n 此方法初始化搜索框和相关按钮,并设置事件处理函数,以便用户可以根据特定文本过滤表格数据。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建立标签,加入主布局\n self.filter_value_label = QLabel()\n self.layout.addWidget(self.filter_value_label)\n # 搜索输入框\n self.filter_value_box = QLineEdit()\n self.filter_value_box.returnPressed.connect(self.filter_table)\n # 设置搜索输入框的尺寸策略和最小宽度\n self.filter_value_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.filter_value_box.setMinimumWidth(100)\n self.layout.addWidget(self.filter_value_box)\n # 搜索按钮\n self.filter_value_button = QPushButton()\n self.filter_value_button.clicked.connect(self.filter_table)\n self.layout.addWidget(self.filter_value_button)\n # 重置按钮\n self.filter_reset_button = QPushButton()\n self.filter_reset_button.clicked.connect(self.filter_reset)\n self.layout.addWidget(self.filter_reset_button)\n\n def _create_separator(self) -> None:\n \"\"\"\n 创建界面中的分隔线。\n\n 此方法用于在过滤器工具栏中添加分隔线。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建好之后,直接加入主布局\n separator = QFrame()\n separator.setFrameShape(QFrame.VLine)\n separator.setFrameShadow(QFrame.Raised)\n self.layout.addWidget(separator)\n\n def filter_options_add(self) -> None:\n \"\"\"\n 填充服务过滤下拉框选项。\n\n 此方法从配置数据中提取所有唯一的服务名称,并将它们添加到服务过滤下拉框中。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 先断开信号\n self.filter_app_box.currentIndexChanged.disconnect(self.filter_table)\n self.filter_app_box.setEnabled(False)\n # 清空过滤器并添加显示所有行的选项\n self.filter_app_box.clear()\n self.filter_app_box.addItem(self.lang['ui.filter_bar_3'], \"all\")\n self.filter_app_box.setCurrentIndex(0)\n # 使用集合和列表推导式去重并获取所有唯一项\n unique_items = {self.table.item(row, COL_INFO['name']['col']).text()\n for row in range(self.table.rowCount())\n if self.table.item(row, COL_INFO['name']['col'])}\n # 添加唯一项到下拉框\n [self.filter_app_box.addItem(item, item) for item in unique_items]\n # 对下拉框进行排序\n model = self.filter_app_box.model()\n model.sort(0)\n except Exception:\n logger.exception(\"Exception occurred in adding filter options\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 重新连接信号\n self.filter_app_box.currentIndexChanged.connect(self.filter_table)\n self.filter_app_box.setEnabled(True)\n\n def filter_reset(self) -> None:\n \"\"\"\n 重置过滤条件。\n\n 此方法将所有过滤组件重置为默认状态,以便用户可以重新开始过滤操作。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 禁更新\n self.table.setUpdatesEnabled(False)\n # 断开信号连接\n self.filter_app_box.currentIndexChanged.disconnect(self.filter_table)\n self.filter_table_box.currentIndexChanged.disconnect(self.filter_table)\n self.filter_table_check_box.stateChanged.disconnect(self.filter_table)\n # 重置 QComboBox 为第一个项,通常是 \"--显示所有--\",\n self.filter_app_box.setCurrentIndex(0)\n self.filter_table_box.setCurrentIndex(0)\n # 还原反选框状态\n self.filter_table_check_box.setChecked(False)\n # 清空搜索框 QLineEdit\n self.filter_value_box.clear()\n # 手动调用过略器\n self.filter_table()\n except Exception:\n logger.exception(\"Error occurred while resetting filters\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 开启更新,重新连接信号\n self.table.setUpdatesEnabled(True)\n self.filter_app_box.currentIndexChanged.connect(self.filter_table)\n self.filter_table_box.currentIndexChanged.connect(self.filter_table)\n self.filter_table_check_box.stateChanged.connect(self.filter_table)\n\n @log_time\n def filter_table(self, rows: Optional[List[int]] = None) -> None:\n \"\"\"\n 应用过滤条件到表格。带有时间记录用于调试。\n\n 此方法根据用户设置的过滤条件(服务名称、表格状态、搜索文本)来决定哪些行在表格中可见。\n\n :param rows: 要应用过滤器的行号列表。如果为空则应用到整表。\n :type rows: Optional[List[int]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self.table.setUpdatesEnabled(False)\n # 获取颜色开关\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n # 检查是否有rows参数\n valid_rows = rows if isinstance(rows, list) else None\n # 计算可见的行数\n visible_rows = 0\n # 搜索框输入内容\n search_value = self.filter_value_box.text().strip().lower()\n # 在新的搜索开始之前,恢复每个单元格的原始样式。\n if color_switch == 'ON':\n # 针对忽略操作,改变表格颜色。\n if valid_rows:\n self.table.apply_color_to_table(valid_rows)\n # 针对高亮操作\n elif self.highlight_rows:\n self._reset_styles()\n\n # 如果没有传入行列表,则应用到整个列表\n for row in valid_rows if valid_rows else range(self.table.rowCount()):\n consistency_data = self.table.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)\n skip_data = self.table.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)\n name_data = self.table.item(row, COL_INFO['name']['col']).text()\n\n # 先匹配快速过滤,匹配过滤条件时为True,隐藏匹配的行\n table_match = self._get_table_match(consistency_data, skip_data)\n if table_match:\n self.table.setRowHidden(row, True)\n continue\n\n # 匹配选择所有或者选择服务名时为True,不设隐藏\n app_match = self._get_app_match(name_data)\n if not app_match:\n self.table.setRowHidden(row, True)\n continue\n\n # 匹配搜索条件或不输入时为True或结果列表,不设隐藏\n search_match = self._get_search_match(row, search_value)\n if not search_match:\n self.table.setRowHidden(row, True)\n continue\n\n # 仅当条件都匹配时才显示行\n self.table.setRowHidden(row, False)\n visible_rows += 1\n\n # 对单元格应用颜色\n if color_switch == 'ON' and isinstance(search_match, list):\n self.highlight_rows.append(self._generate_index_key(row))\n for column in search_match:\n self.table.apply_color(row, COLOR_HIGHLIGHT, column)\n\n # 更新状态栏信息展示过滤后的行数\n self.status_updated.emit(f\"{visible_rows} {self.lang['ui.filter_bar_11']}\")\n except Exception:\n logger.exception(\"Exception in filtering table\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n self.table.setUpdatesEnabled(True)\n\n def _get_app_match(self, name_data: str) -> bool:\n \"\"\"\n 检查当前行是否与选定的应用服务匹配。\n\n :param name_data: 行中的应用服务名称。\n :type name_data: str\n\n :return: 如果当前行与选定的应用服务匹配,则返回 True。\n :rtype: bool\n \"\"\"\n selected_app = self.filter_app_box.currentData()\n return True if selected_app == \"all\" or selected_app == name_data else False\n\n def _get_table_match(self,\n consistency_data: str,\n skip_data: str) -> bool:\n \"\"\"\n 根据表格状态过滤条件检查当前行是否匹配。\n\n :param consistency_data: 一致性状态数据。\n :type consistency_data: str\n :param skip_data: 跳过状态数据。\n :type skip_data: str\n\n :return: 如果当前行符合表格状态过滤条件,则返回 True。\n :rtype: bool\n \"\"\"\n selected_table = self.filter_table_box.currentData()\n reverse_checked = self.filter_table_check_box.isChecked()\n # 直接对比较结果赋值bool,相等则为True\n fully_match = consistency_data == \"fully\"\n partially_match = consistency_data == \"partially\"\n skip_match = skip_data == \"yes\"\n # 根据快速过滤条件,返回组合比较结果。\n if selected_table == \"fully\":\n return fully_match if not reverse_checked else not fully_match\n elif selected_table == \"partially\":\n return partially_match if not reverse_checked else not partially_match\n elif selected_table == 'skip':\n return skip_match if not reverse_checked else not skip_match\n elif selected_table == \"fully+skip\":\n return (fully_match or skip_match) if not reverse_checked else not (fully_match or skip_match)\n elif selected_table == \"fully+partially\":\n return (fully_match or partially_match) if not reverse_checked else not (fully_match or partially_match)\n elif selected_table == \"fully+partially+skip\":\n return (fully_match or partially_match or skip_match) if not reverse_checked else not (fully_match or partially_match or skip_match)\n else:\n return False if not reverse_checked else True\n\n def _get_search_match(self,\n row: int,\n search_value: str) -> Union[bool, List[int]]:\n \"\"\"\n 检查当前行是否与搜索条件匹配。\n\n :param row: 表格中的行号。\n :type row: int\n :param search_value: 需要搜索的值。\n :type search_value: str\n\n :return: 如果搜索值为空,则返回 True。否则返回空列表或匹配列号的列表\n :rtype: Union[bool, List[int]\n \"\"\"\n # 如果搜索值为空,则无需进行搜索\n if not search_value:\n return True\n # 禁止更新。主要着色时操作太多。\n self.table.setUpdatesEnabled(False)\n match_col = []\n # 遍历每列的内容\n for column in range(self.table.columnCount()):\n # 不搜索隐藏的列\n if self.table.isColumnHidden(column):\n continue\n\n # 获取单元格内容\n item = self.table.item(row, column)\n item_text = item.text().lower() if item else ''\n\n # 单元格列号插入到返回列表\n if search_value in item_text:\n match_col.append(column)\n\n # 启用更新\n self.table.setUpdatesEnabled(True)\n return match_col\n\n def _generate_index_key(self, row: int) -> str:\n \"\"\"\n 生成索引键。\n\n 此方法根据给定的行号生成一个唯一的索引键。索引键由行中特定列的值组合而成,用于标识表格中的唯一行。\n\n :param row: 表格中的行号。\n :type row: int\n\n :return: 生成的索引键。\n :rtype: str\n \"\"\"\n name = self.table.item(row, COL_INFO['name']['col']).text()\n group = self.table.item(row, COL_INFO['group']['col']).text()\n key = self.table.item(row, COL_INFO['key']['col']).text()\n return f\"{name}+{group}+{key}\"\n\n def _reset_styles(self) -> None:\n \"\"\"\n 重置表格样式到记录的状态。\n\n 此方法遍历表格中的所有行,对于每一行,恢复其单元格的背景颜色到初始状态。这通常在过滤条件发生变化或重置时调用。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n\n try:\n reset_rows = []\n # 还原字典为空则跳过\n if not self.highlight_rows:\n return\n # 遍历每行,但跳过隐藏行,因为隐藏行必然没有被更改颜色。\n # 反过来遍历还原字典并不可行,因为索引键并不储存在表格中,\n # 而且索引键和行号并不能形成牢固对应关系(行号可变),\n # 所以遍历所有行,但只操作匹配的单元格,最大程度减少对单元格的操作。\n for row in range(self.table.rowCount()):\n if self.table.isRowHidden(row):\n continue\n # 生成当行索引键,并检测是否在还原列表中。\n elif self._generate_index_key(row) in self.highlight_rows:\n # 索引键在还原字典中找到时,向reset_rows插入行数\n reset_rows.append(row)\n # 最后一次性还原单元格本来颜色。\n self.table.apply_color_to_table(reset_rows)\n # 完成后,清空还原列表。\n self.highlight_rows.clear()\n except Exception:\n logger.exception(\"Error occurred while resetting styles\")" }, { "identifier": "LangManager", "path": "ui/lang_manager.py", "snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")" }, { "identifier": "message_show", "path": "ui/message_show.py", "snippet": "def message_show(message_type: str,\n text: str) -> None:\n \"\"\"\n 显示指定类型的消息框。\n\n 根据提供的消息类型和文本内容,显示相应的消息框。支持的消息类型包括 'Critical'、'Warning' 和 'Information'。\n\n :param message_type: 消息类型,支持 'Critical'、'Warning' 和 'Information'。\n :type message_type: str\n :param text: 消息框中显示的文本内容。\n :type text: str\n :return: 无返回值。\n :rtype: None\n \"\"\"\n try:\n msg_box = QMessageBox()\n msg_box.setText(text)\n msg_box.setStandardButtons(QMessageBox.Ok)\n msg_box.setWindowTitle(message_type)\n\n if message_type == 'Critical':\n msg_box.setIcon(QMessageBox.Critical)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-error-26')))\n elif message_type == 'Warning':\n msg_box.setIcon(QMessageBox.Warning)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-do-not-disturb-26')))\n elif message_type == 'Information':\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-about-26')))\n else:\n logger.warning(\"Invalid message type provided.\")\n\n msg_box.exec_()\n except Exception:\n logger.exception(\"An error occurred while displaying the message box\")" }, { "identifier": "TableMain", "path": "ui/table_main.py", "snippet": "class TableMain(QTableWidget):\n \"\"\"\n 主表格类,用于展示和管理数据行。\n\n 此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。\n 通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。\n\n :param lang_manager: 用于管理界面语言的 LangManager 实例。\n :type lang_manager: LangManager\n :param config_manager: 用于管理配置的 ConfigManager 实例。\n :type config_manager: ConfigManager\n\n :author: assassing\n :contact: https://github.com/hxz393\n :copyright: Copyright 2023, hxz393. 保留所有权利。\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n # 实例化用到的组件\n self.actionCopy = ActionCopy(self.lang_manager, self)\n self.actionSave = ActionSave(self.lang_manager, self)\n self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)\n self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self)\n # 手动连接实例化的组件信号到转发函数\n self.actionCopy.status_updated.connect(self.forward_status)\n self.actionSave.status_updated.connect(self.forward_status)\n self.actionSkip.status_updated.connect(self.forward_status)\n self.actionSkip.filter_updated.connect(self.forward_filter)\n self.actionUnskip.status_updated.connect(self.forward_status)\n self.actionUnskip.filter_updated.connect(self.forward_filter)\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面。\n\n 此方法负责设置表格的基本属性,如列数、表头标签、选择行为等。还包括对特定列的隐藏和宽度调整策略的设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 先运行语言更新,里面有表头定义\n self.update_lang()\n self.hidden_cols = [\"pro_time\", \"pre_time\", \"test_time\", \"dev_time\"]\n self.resize_cols = [\"name\", \"group\", \"consistency\", \"skip\"]\n # 配置表格基本属性\n self.setColumnCount(len(self.column_headers))\n self.setHorizontalHeaderLabels(self.column_headers)\n self.setEditTriggers(QTableWidget.NoEditTriggers)\n self.setSelectionBehavior(QTableWidget.SelectItems)\n # 隐藏垂直表头\n self.verticalHeader().setVisible(False)\n # 启用自动换行,没生效\n self.setWordWrap(True)\n self.setTextElideMode(Qt.ElideNone)\n # 为表头视图设置上下文菜单事件\n self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)\n self.horizontalHeader().customContextMenuRequested.connect(self._header_context_menu)\n # 为表单设置上下文菜单事件\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(self._cell_context_menu)\n # 隐藏指定列\n [self.hideColumn(COL_INFO[i]['col']) for i in self.hidden_cols]\n # 设置表宽度策略\n self.set_header_resize()\n\n def set_header_resize(self):\n \"\"\"\n 设置表头的列宽度和调整策略。\n\n 此方法负责定义表头列的宽度调整策略和其他相关属性。它设置了表头列的默认宽度、是否可拖动以及列的自动调整策略。\n 例如,某些列被设置为根据内容自动调整宽度,而其他列则被设置为可伸缩以适应表格的大小。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 设置默认列宽度,列宽调整策略,列可拖动\n self.horizontalHeader().setSectionsMovable(True)\n self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.horizontalHeader().setMinimumSectionSize(100)\n # 设置要自动调整宽度的列\n [self.horizontalHeader().setSectionResizeMode(COL_INFO[i]['col'], QHeaderView.ResizeToContents) for i in self.resize_cols]\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.column_headers = [\n self.lang['ui.table_main_1'],\n self.lang['ui.table_main_2'],\n self.lang['ui.table_main_3'],\n self.lang['ui.dialog_settings_connection_2'],\n f\"{self.lang['ui.dialog_settings_connection_2']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_3'],\n f\"{self.lang['ui.dialog_settings_connection_3']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_4'],\n f\"{self.lang['ui.dialog_settings_connection_4']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_5'],\n f\"{self.lang['ui.dialog_settings_connection_5']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.table_main_5'],\n self.lang['ui.table_main_6'],\n ]\n # 重新应用到表头\n self.setHorizontalHeaderLabels(self.column_headers)\n # 定义数据和显示映射的字典\n consistency_status_mapping = {\n \"inconsistent\": self.lang['ui.action_start_8'],\n \"fully\": self.lang['ui.action_start_9'],\n \"partially\": self.lang['ui.action_start_10'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n skip_status_mapping = {\n \"no\": self.lang['ui.action_start_11'],\n \"yes\": self.lang['ui.action_start_12'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n for row in range(self.rowCount()):\n # 更新忽略状态文字\n self._update_item_text(row, \"skip\", skip_status_mapping)\n # 更新一致性状态文字\n self._update_item_text(row, \"consistency\", consistency_status_mapping)\n\n def _update_item_text(self,\n row: int,\n user_data_key: str,\n text_mapping: Dict[str, str]) -> None:\n \"\"\"\n 根据提供的文本映射更新指定行的项文本。\n\n 此方法用于更新表格或列表中特定行的文本。它根据用户数据键(user_data_key)获取对应行的项,然后根据提供的文本映射(text_mapping)更新该项的文本。\n\n :param row: 要更新的行索引。\n :type row: int\n :param user_data_key: 用于获取项的用户数据键。\n :type user_data_key: str\n :param text_mapping: 用户数据到文本的映射字典。\n :type text_mapping: Dict[str, str]\n\n :return: 无返回值。\n :rtype: None\n \"\"\"\n item = self.item(row, COL_INFO[user_data_key]['col'])\n if item is not None:\n user_data = item.data(Qt.UserRole)\n if user_data in text_mapping:\n item.setText(text_mapping[user_data])\n\n def keyPressEvent(self, event: QKeyEvent) -> None:\n \"\"\"\n 处理键盘事件。\n\n 此方法用于处理键盘事件,特别是复制功能的快捷键。如果按下 Ctrl+C,则复制选中的单元格内容。\n\n :param event: 键盘事件对象。\n :type event: QKeyEvent\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n if event.key() == Qt.Key_C and (event.modifiers() & Qt.ControlModifier):\n self.actionCopy.action_copy()\n else:\n super().keyPressEvent(event)\n\n def _cell_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表格单元格的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n menu.addAction(self.actionCopy.action_copy)\n separator = QAction(menu)\n separator.setSeparator(True)\n menu.addAction(separator)\n menu.addAction(self.actionSkip.action_skip)\n menu.addAction(self.actionUnskip.action_unskip)\n sep = QAction(menu)\n sep.setSeparator(True)\n menu.addAction(sep)\n menu.addAction(self.actionSave.action_save)\n menu.exec_(self.viewport().mapToGlobal(pos))\n\n def _header_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表头的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n # 动态创建一个菜单项,用于隐藏/显示列\n for index in range(self.columnCount()):\n column_name = self.horizontalHeaderItem(index).text()\n action = menu.addAction(f\"{column_name}\")\n action.setCheckable(True)\n action.setChecked(not self.isColumnHidden(index))\n action.setData(index)\n action.triggered.connect(self._toggle_column_visibility)\n # 在鼠标右键点击位置显示菜单\n menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos))\n\n def _toggle_column_visibility(self) -> None:\n \"\"\"\n 根据用户选择,切换列的可见性。\n\n 此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n action = self.sender()\n if isinstance(action, QAction):\n column_index = action.data()\n if action.isChecked():\n self.showColumn(column_index)\n else:\n self.hideColumn(column_index)\n\n def add_row(self, data: List[List[str]]) -> None:\n \"\"\"\n 向表格中添加一行数据。\n\n :param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n row_position = 0\n try:\n # 获取最后行数\n row_position = self.rowCount()\n # 插入最后一行\n self.insertRow(row_position)\n # 插入单元格数据\n self._fill_row_data(row_position, data)\n except Exception:\n logger.exception(f\"Error occurred while adding a new row at position {row_position}\")\n self.removeRow(row_position)\n\n def _fill_row_data(self,\n row_position: int,\n data: List[List[str]]) -> None:\n \"\"\"\n 填充指定行的数据。\n\n :param row_position: 行位置\n :param data: 行数据\n :type row_position: int\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for column, (display_text, user_data) in enumerate(data):\n # 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole)\n item = QTableWidgetItem(str(display_text))\n # 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole)\n item.setData(Qt.UserRole, user_data)\n # 设置单元格不可编辑状态\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\n # 正常表格插入方法\n self.setItem(row_position, column, item)\n\n @log_time\n def apply_color_to_table(self, rows: List[int] = None) -> None:\n \"\"\"\n 对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。\n\n :param rows: 可选,要应用颜色的行号列表。\n :type rows: List[int], optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n if color_switch == 'OFF':\n return\n\n if rows is None or not isinstance(rows, list):\n rows = range(self.rowCount())\n\n try:\n for row in rows:\n # 不给隐藏行设置颜色\n if self.isRowHidden(row):\n continue\n\n self._process_row_for_color(row)\n except Exception:\n logger.exception(\"Exception in apply_color_to_table method\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _process_row_for_color(self, row: int) -> None:\n \"\"\"\n 根据一致性、跳过状态和是否为空值给单行应用颜色。\n\n :param row: 行号,对每行进行颜色处理。\n :type row: int\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)\n skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)\n # 忽略状态为是时设置颜色\n if skip_data == 'yes':\n self.apply_color(row, COLOR_SKIP)\n return\n\n # 根据一致性值设置颜色\n if consistency_data == 'fully':\n self.apply_color(row, COLOR_CONSISTENCY_FULLY)\n elif consistency_data == 'partially':\n self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY)\n else:\n self.apply_color(row, COLOR_DEFAULT)\n\n # 遍历指定列检查空值,并赋予颜色\n for column in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(column):\n if self.item(row, column).text() == 'None':\n self.apply_color(row, COLOR_EMPTY, column)\n\n def apply_color(self,\n row: int,\n color: str,\n column: Optional[int] = None) -> None:\n \"\"\"\n 为指定的行或单元格应用颜色。\n\n :param row: 要着色的行索引。\n :type row: int\n :param color: 要应用的颜色。\n :type color: str\n :param column: 可选,指定要着色的列索引,如果未指定,则对整行应用颜色。\n :type column: int, optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n color_brush = QBrush(QColor(color))\n if column is not None:\n self.item(row, column).setBackground(color_brush)\n else:\n for col in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(col):\n self.item(row, col).setBackground(color_brush)\n except Exception:\n logger.exception(\"Error occurred while applying color to a cell\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def clear(self) -> None:\n \"\"\"\n 清空表格中的所有行。\n\n 此方法用于清除表格中的所有数据,通常在数据更新或重置时使用。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 禁用更新以提高性能\n self.setUpdatesEnabled(False)\n # 首先清除所有单元格的内容\n self.clearContents()\n # 将行数设置为0,从而删除所有行\n self.setRowCount(0)\n except Exception:\n logger.exception(\"Error occurred while clearing the table.\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 确保即使发生错误也要重新启用更新\n self.setUpdatesEnabled(True)\n\n def forward_status(self, message: str) -> None:\n \"\"\"\n 用于转发状态信号。\n\n :param message: 要转发的消息。\n :type message: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.status_updated.emit(message)\n\n def forward_filter(self, rows: List[int]) -> None:\n \"\"\"\n 用于转发过滤信号。\n\n :param rows: 要转发的行列表。\n :type rows: List[int]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.filter_updated.emit(rows)\n\n def get_table_data(self) -> Dict[int, Dict[str, str]]:\n \"\"\"\n 用于获取表格所有数据。\n\n :rtype: Dict[int, Dict[str, str]]\n :return: 返回嵌套字典。键为行号,值为字典,字典中键为列标题,值为内容。类似于:{882: {'服务': 'web', '分组': 'application'}, 883: {'服务': 'web', '分组': 'application'}}\n \"\"\"\n return {row: {self.horizontalHeaderItem(col).text(): self.item(row, col).data(Qt.UserRole)\n for col in range(self.columnCount())}\n for row in range(self.rowCount())}" } ]
import logging from typing import Dict, List from PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction, QHeaderView from config.settings import COL_INFO from lib.get_resource_path import get_resource_path from module.execute_queries import execute_queries from ui.config_manager import ConfigManager from ui.filter_bar import FilterBar from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
13,377
""" 提供应用程序的主要功能,包括用户界面初始化、数据库查询执行、数据展示和处理。 本模块中包含的类负责应用程序的主要操作流程,如用户界面的初始化、按钮动作的处理、后台数据查询、数据展示等。主要类包括`ActionStart`和`StartWork`,分别负责处理用户界面动作和执行后台工作。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionStart(QObject): """ 负责处理用户界面动作,例如初始化界面、响应按钮点击等。 此类包含了界面的主要动作逻辑,如开始按钮的点击处理、用户界面语言的更新、表格的数据填充等。它与后台线程`StartWork`协作,实现数据的查询和展示。 :param lang_manager: 语言管理器,用于界面语言的加载和更新。 :param config_manager: 配置管理器,提供应用程序的配置信息。 :param table: 主表格界面,用于数据的展示。 :param filter_bar: 过滤条,用于数据的筛选。 :type lang_manager: LangManager :type config_manager: ConfigManager :type table: TableMain :type filter_bar: FilterBar """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager,
""" 提供应用程序的主要功能,包括用户界面初始化、数据库查询执行、数据展示和处理。 本模块中包含的类负责应用程序的主要操作流程,如用户界面的初始化、按钮动作的处理、后台数据查询、数据展示等。主要类包括`ActionStart`和`StartWork`,分别负责处理用户界面动作和执行后台工作。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionStart(QObject): """ 负责处理用户界面动作,例如初始化界面、响应按钮点击等。 此类包含了界面的主要动作逻辑,如开始按钮的点击处理、用户界面语言的更新、表格的数据填充等。它与后台线程`StartWork`协作,实现数据的查询和展示。 :param lang_manager: 语言管理器,用于界面语言的加载和更新。 :param config_manager: 配置管理器,提供应用程序的配置信息。 :param table: 主表格界面,用于数据的展示。 :param filter_bar: 过滤条,用于数据的筛选。 :type lang_manager: LangManager :type config_manager: ConfigManager :type table: TableMain :type filter_bar: FilterBar """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager,
table: TableMain,
7
2023-11-07 01:02:38+00:00
16k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetric per-token activation,\n and int8 symmetric per-channel weight quantization\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n ) -> None:\n super().__init__(in_features, out_features, bias)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n of int8 dynamic symmetric per-token activation and int8 symmetric per-channel weight\n quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n\n Y = quant_int8_dynamic_per_token_linear(\n X, self.W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(\n cls, mod: torch.nn.Linear\n ) -> \"DynamicallyPerAxisQuantizedLinear\":\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `DynamicallyPerAxisQuantizedLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n DynamicallyPerAxisQuantizedLinear: The converted quantized linear module.\n\n \"\"\"\n\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n W_int_repr, W_scales, _W_zps = dynamically_quantize_per_channel(\n mod.weight, -128, 127, torch.int8\n )\n new_mod.register_buffer(\"W_int_repr_t\", W_int_repr.contiguous().t())\n new_mod.W_scales = nn.Parameter(W_scales)\n new_mod.bias = mod.bias\n del new_mod.weight\n\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod" }, { "identifier": "apply_dynamic_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_dynamic_quant(model, filter_fn=None):\n \"\"\"\n Applies dynamic symmetric per-token activation and per-channel weight\n quantization to all linear layers in the given model using\n module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n lambda mod: DynamicallyPerAxisQuantizedLinear.from_float(mod),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "apply_weight_only_int8_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_weight_only_int8_quant(model, filter_fn=None):\n \"\"\"\n Applies weight-only symmetric per-channel int8 quantization to all linear layers\n in the given model using module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n WeightOnlyInt8QuantLinear.from_float,\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int8_dqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_dqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the `Int8DynamicallyQuantizedLinearWeight`\n Tensor subclass, effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n if filter_fn is None:\n filter_fn = (\n lambda *args:\n _is_linear(*args) and\n _in_features_greater_than_16(*args)\n )\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight),\n filter_fn\n )" }, { "identifier": "change_linear_weights_to_int8_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_woqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the\n `Int8WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int4_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int4_woqtensors(model, **kwargs):\n \"\"\"\n Converts all linear weight tensors to the\n `Int4WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n filter_fn = kwargs.pop(\"filter_fn\", _is_linear)\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, **kwargs),\n filter_fn,\n )" }, { "identifier": "_replace_with_custom_fn_if_matches_filter", "path": "torchao/quantization/quant_api.py", "snippet": "def _replace_with_custom_fn_if_matches_filter(\n model, replacement_fn, filter_fn, cur_fqn=\"\"\n) -> None:\n \"\"\"\n For each `child` in `model`, replaces it with `replacement_fn(child)`\n if `filter_fn(child)` is `True`\n \"\"\"\n if filter_fn(model, cur_fqn[:-1]):\n model = replacement_fn(model)\n return model\n else:\n for name, child in model.named_children():\n new_child = _replace_with_custom_fn_if_matches_filter(\n child, replacement_fn, filter_fn, f\"{cur_fqn}{name}.\"\n )\n if new_child is not child:\n setattr(model, name, new_child)\n return model" }, { "identifier": "dequantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_channel(int_repr, scales, zero_points, out_dtype=torch.float32):\n # assumes axis is 0\n y = int_repr.transpose(0, 1)\n y = y.to(out_dtype)\n y = y - zero_points\n y = y * scales\n y = y.transpose(0, 1)\n return y" }, { "identifier": "dequantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_tensor(int_repr, scale, zero_point, out_dtype=torch.float32):\n y = int_repr.to(out_dtype)\n if zero_point is not None:\n y -= zero_point\n return y * scale" }, { "identifier": "dynamically_quantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(future): relax ^ as needed\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n # get min and max\n min_val, max_val = torch.aminmax(x, dim=1)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n # reference: https://fburl.com/code/4wll53rk\n max_val_pos = torch.max(-min_val_neg, max_val_pos)\n scale = max_val_pos / (float(quant_max - quant_min) / 2)\n # ensure scale is the same dtype as the original tensor\n scale = torch.clamp(scale, min=eps).to(x.dtype)\n zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n x_div = x.transpose(0, 1) / scale\n x_round = torch.round(x_div)\n x_zp = x_round + zero_point\n x_zp = x_zp.transpose(0, 1)\n quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)\n\n return quant, scale, zero_point" }, { "identifier": "dynamically_quantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_tensor(\n x,\n quant_min,\n quant_max,\n target_dtype,\n qscheme=torch.per_tensor_affine, # for now, reuse existing qscheme enum\n):\n # assumes affine quantization\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n if qscheme == torch.per_tensor_affine:\n # get min and max\n # TODO(future): make torch.aminmax work on cpu-half\n # min_val, max_val = torch.aminmax(x)\n min_val = torch.min(x)\n max_val = torch.max(x)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)\n # TODO(future): make torch.clamp with scalar work on cpu-half\n scale = torch.clamp(scale, min=eps).reshape(1)\n zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)\n zero_point = torch.clamp(zero_point, quant_min, quant_max)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n quant = torch.clamp(\n torch.round(x / scale) + zero_point, quant_min, quant_max\n ).to(target_dtype)\n\n else:\n assert qscheme == torch.per_tensor_symmetric, f\"unsupported qscheme {qscheme}\"\n # assert quant_min == -1 * quant_max, \"unsupported quant_min/quant_max\"\n amax = torch.max(torch.abs(x))\n scale = amax / (float(quant_max - quant_min) / 2)\n scale = torch.clamp(scale, min=eps).reshape(1)\n quant = torch.clamp(torch.round(x / scale), quant_min, quant_max).to(\n target_dtype\n )\n # do not create a tensor for zero_point as this is expensive\n zero_point = None\n\n return quant, scale, zero_point" }, { "identifier": "quant_int8_dynamic_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_linear(\n x,\n x_quant_min,\n x_quant_max,\n x_q_dtype,\n w_vals_int8_t,\n w_scales,\n w_vals_int8_t_sums_int64,\n bias,\n out_dtype=torch.float32,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scale, x_zp = dynamically_quantize_per_tensor(\n x, x_quant_min, x_quant_max, x_q_dtype\n )\n # w_vals_int8_t_sums_int64 = w_vals_int8_t.sum(dim=0)\n mm_out = quant_int8_matmul(\n x_vals_int8,\n x_scale,\n x_zp,\n w_vals_int8_t,\n w_vals_int8_t_sums_int64,\n w_scales,\n out_dtype,\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quant_int8_dynamic_per_token_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_per_token_linear(\n x,\n w_vals_int8_t,\n w_scales,\n bias,\n out_dtype,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)\n mm_out = quant_int8_per_token_matmul(\n x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quantize_activation_per_token_absmax", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quantize_activation_per_token_absmax(t):\n n_bits = 8\n # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1]\n\n scales = t.abs().amax(dim=-1, keepdim=True)\n if scales.dtype == torch.float16:\n scales = (\n scales.float()\n ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)\n q_max = 2 ** (n_bits - 1) - 1\n scales = scales.clamp(min=1e-5).div(q_max)\n # Note: the original smoothquant does not clamp to qmin/qmax here,\n # but some of the tests with bfloat16 ended up with a flipped sign\n # if we don't clamp. TODO(future) look into this further.\n t = torch.round(t / scales).clamp(-127, 127).to(torch.int8)\n return t, scales" }, { "identifier": "safe_int_mm", "path": "torchao/quantization/quant_primitives.py", "snippet": "def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n This function wraps torch._int_mm and avoids several undesirable behaviors of the function for certain inputs while still\n returning correct results and being torch.compiled in a performant way.\n\n Assumes both tensors have dimension of 2.\n\n Note: no error checking for torch.compiled path, if input.shape = [i, j] and j<=16 then the triton kernel\n will error.\n\n Args:\n input (Tensor, int8): the first tensor to be multiplied\n mat2 (Tensor, int8): the second tensor to be multiplied\n\n Return:\n out (Tensor, int32): the result of the matmul with device matching that of the inputs\n \"\"\"\n\n # torch.compile path\n if dynamo_is_compiling() or \"FakeTensor\" in input.__repr__():\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)\n\n # error checking for cublas path\n assert (\n mat2.device == input.device\n ), f\"need both tensors to be on the same device but got {mat2.device} and {input.device}\"\n device_cpu = \"cpu\" in [mat2.device.type, input.device.type]\n # with input.shape = [i,j] and mat2.shape = [j,k]\n i_is_strictly_greater_than_16 = input.shape[0] > 16\n j_is_nonzero_multiple_of_8 = (input.shape[1] % 8 == 0) and (input.shape[1] > 0)\n k_is_nonzero_multiple_of_8 = (mat2.shape[1] % 8 == 0) and (mat2.shape[1] > 0)\n bad_dimensions_for_cublas = not (\n i_is_strictly_greater_than_16\n and j_is_nonzero_multiple_of_8\n and k_is_nonzero_multiple_of_8\n )\n\n if device_cpu or bad_dimensions_for_cublas:\n # fallback path\n return torch.matmul(input.cpu().to(torch.int32), mat2.cpu().to(torch.int32)).to(\n input.device.type\n )\n\n # cublas paths\n if not mat2.is_contiguous(): # silently gives incorrect result without this\n mat2 = mat2.contiguous()\n if (not input.is_contiguous()) and (\n input.shape[0] % 8 != 0\n ): # gives cryptic error without this\n input = (\n input.contiguous()\n ) # (it seems the transpose makes cublas check the above j constraint on i)\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)" }, { "identifier": "get_scale", "path": "torchao/quantization/smoothquant.py", "snippet": "def get_scale(X_absmax, W_absmax, alpha=0.5):\n \"\"\"\n Calculate the scale based on abs(max(X)), abs(max(W)) and alpha\n If X is of dimension `b*n*k` and W is dimension `k*m`, the returned\n scale is of dimension `k`.\n Note: X_absmax is calculated outside of this function because we\n need to keep a running version of it during calibration. W_absmax\n is calculated outside of this function for consistency with X_absmax.\n \"\"\"\n X_pow = torch.pow(X_absmax, alpha)\n W_pow = torch.pow(W_absmax, 1.0 - alpha)\n div = X_pow / W_pow\n return div.reshape(-1)" }, { "identifier": "smooth_fq_linear_to_inference", "path": "torchao/quantization/smoothquant.py", "snippet": "def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None:\n for _, mod in model.named_modules():\n if isinstance(mod, tuple(source_cls_to_target_cls.values())):\n if debug_skip_calibration:\n mod.set_debug_x_absmax()\n mod.to_inference()" }, { "identifier": "SmoothFakeDynamicallyQuantizedLinear", "path": "torchao/quantization/smoothquant.py", "snippet": "class SmoothFakeDynamicallyQuantizedLinear(SmoothFakeDynQuantMixin, torch.nn.Linear):\n \"\"\"\n This is a replacement for `torch.nn.Linear` which implements dynamic per-token\n activation quantization and dynamic per-channel weight quantization based on\n Smoothquant scaling.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n alpha = kwargs.pop(\"alpha\")\n super().__init__(*args, **kwargs)\n self.init_smoothquant_variables(alpha)\n\n def forward(self, X, *args, **kwargs):\n if self.calibrating:\n self.update_x_running_abs_max(X)\n Y = F.linear(X, self.weight, self.bias)\n else:\n if not self.debug_skip_scaling:\n # Ideally this would be fused into preceding layers\n # but in practice torch.compile fuses it with other\n # ops so the slowdown is minimal\n X = X / self.smooth_scale\n W_int_repr_t = (\n self.W_int_repr if self.store_w_int_repr_t else self.W_int_repr.t()\n )\n Y = quant_int8_dynamic_per_token_linear(\n X, W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(cls, mod, alpha=0.5):\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the smooth fake quantized\n version of it. Note: requires calibration.\n \"\"\"\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features, fake_out_features, bias=mod.bias is not None, alpha=alpha\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n new_mod.weight = mod.weight\n new_mod.bias = mod.bias\n # TODO: test when creation is on cuda\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod\n\n def to_inference(self):\n \"\"\"\n Calculates the smoothquant scale based on calibration\n in preparation for inference\n \"\"\"\n assert self.x_running_abs_max is not None, \"no calibration data found\"\n self.calibrating = False\n self.smooth_scale = get_scale(\n self.x_running_abs_max,\n torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values,\n alpha=self.alpha,\n )\n self.fold_weight()\n\n def set_debug_x_absmax(self):\n w_absmax = torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values\n self.x_running_abs_max = w_absmax" }, { "identifier": "swap_linear_with_smooth_fq_linear", "path": "torchao/quantization/smoothquant.py", "snippet": "def swap_linear_with_smooth_fq_linear(\n model, skip_fqn_list=None, cur_fqn=\"\", alpha=0.5\n) -> None:\n\n name_to_child = dict(model.named_children())\n for name, child in name_to_child.items():\n if cur_fqn == \"\":\n new_fqn = name\n else:\n new_fqn = f\"{cur_fqn}.{name}\"\n if ((skip_fqn_list is None) or (new_fqn not in skip_fqn_list)) and (\n type(child) in source_cls_to_target_cls.keys()\n ):\n target_cls = source_cls_to_target_cls[type(child)]\n new_child = target_cls.from_float(child, alpha=alpha)\n setattr(model, name, new_child)\n else:\n swap_linear_with_smooth_fq_linear(child, skip_fqn_list, new_fqn, alpha)" }, { "identifier": "Int8DynamicallyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module, changes the\n linear op to a dynamically quantized linear op with symmetric per-token and per-channel\n quantization on the activation and weight respectively.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, q_scales, transposed, shape, **kwargs):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", q_scales.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, q_scales, transposed, shape, **kwargs):\n self.q_scales = q_scales\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n return quant_int8_dynamic_per_token_linear(\n act_mat, w_qtensor.int_data, w_qtensor.q_scales, bias, act_mat.dtype\n )\n\n def dequantize(self, dtype=None):\n \"\"\"\n Obtain the dequantized version of the quantized tensor subclass\n \"\"\"\n dq_t = dequantize_per_channel(\n self.int_data.t(), self.q_scales, 0, self.dtype if dtype is None else dtype\n ).to(self.dtype)\n # data was transposed to dequantize so make sure shape is correct\n return dq_t if not self.transposed else dq_t.t()\n\n def int_repr(self):\n \"\"\"\n Get the internal integer representation of the quantized tensor\n \"\"\"\n return self.int_data if self.transposed else self.int_data.t()\n\n def q_params(self):\n \"\"\"\n Get the quantization scales for the quantized tensor\n \"\"\"\n return {\"q_scales\": self.q_scales}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.q_scales.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"q_scales\"], [self.transposed, self.dtype, self.shape]\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None):\n int_data, q_scales = tensor_data_dict[\"int_data\"], tensor_data_dict[\"q_scales\"]\n transposed, dtype, shape = tensor_attributes\n return cls(int_data, q_scales, transposed, shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride)\n\n @classmethod\n def from_float(cls, input_float, qmin=-128, qmax=127):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int8DynamicallyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n w_int_repr, w_scales, _ = dynamically_quantize_per_channel(\n input_float, qmin, qmax, torch.int8\n )\n # the desired representation shape for fast quantized matmul is\n # transposed compared to how it's stored as a linear weight,\n # i.e. we want in_channels as dim=0 and out_channels (and quantized axis) as dim=1\n # however the external representation of our tensor will maintain the correct\n # shape attribute which needs to be tracked directly.\n int_data = w_int_repr.contiguous().t()\n if cls is not Int8DynamicallyQuantizedLinearWeight:\n int_data = int_data.contiguous()\n return cls(\n int_data, w_scales, False, input_float.shape, dtype=input_float.dtype\n )" }, { "identifier": "Int8WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes the linear op to a weight-only quantized linear op with symmetric\n per-channel quantization on the weight.\n \"\"\"\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_dtype = act_mat.dtype\n y = torch.mm(act_mat.reshape(-1, act_mat.shape[-1]), w_qtensor.int_data.to(act_mat.dtype)) * w_qtensor.q_scales\n y = y.reshape(*act_mat.shape[:-1], y.shape[-1])\n if bias is not None:\n y += bias\n return y.to(orig_dtype)" }, { "identifier": "Int4WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes that linear op to a weight-only int4 quantized linear op with groupwise\n affine quantization on the weight.\n \"\"\"\n\n @staticmethod\n def __new__(\n cls,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize=128,\n inner_k_tiles=8,\n **kwargs,\n ):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", scales_and_zeros.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(\n self,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize,\n inner_k_tiles,\n **kwargs,\n ):\n # the transposed flag tracks whether the tensor subclass has been transposed relative\n # to how a weight is normally stored in a linear i.e. [out_features, in_features].\n # tracking both transposed and shape is slightly redundant but corner cases like\n # square matrices can cause issues otherwise\n self.scales_and_zeros = scales_and_zeros\n self.groupsize = groupsize\n self.inner_k_tiles = inner_k_tiles\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_act_size = act_mat.size()\n orig_dtype = act_mat.dtype\n\n # reshape and pad activation\n act_mat = act_mat.reshape(-1, act_mat.shape[-1]).to(torch.bfloat16)\n pad_size = find_multiple(act_mat.shape[-1], 1024)\n act_mat = torch.nn.functional.pad(act_mat, (0, pad_size - act_mat.shape[-1]))\n\n # matmul\n y = aten._weight_int4pack_mm(\n act_mat.contiguous(), w_qtensor.int_data, w_qtensor.groupsize, w_qtensor.scales_and_zeros\n )\n\n # remove out_feature padding\n orig_out_features = w_qtensor.shape[-1] if w_qtensor.transposed else w_qtensor.shape[-2]\n y = y[:, :orig_out_features]\n\n y = y.reshape(*orig_act_size[:-1], orig_out_features)\n if bias is not None:\n y += bias\n return y.to(orig_dtype)\n\n def dequantize(self):\n eye_shape = self.shape[1] if not self.transposed else self.shape[0]\n w_dq = self._quantized_op(\n torch.eye(eye_shape, device=self.device, dtype=self.dtype), self, None\n )\n # we dequantized using linear with the identity matrix, output has shape [in_channels, out_channels]\n # so we need to transpose back to get the original shape unless self.transposed is set.\n w_dq = w_dq if self.transposed else w_dq.t()\n return w_dq.to(self.dtype)\n\n def int_repr(self):\n return self.int_data\n\n def q_params(self):\n scales, zero_points = unpack_tinygemm_scales_and_zeros(\n self.scales_and_zeros,\n )\n return {\"q_scales\": scales, \"q_zero_points\": zero_points}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.scales_and_zeros.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data),\n fn(self.scales_and_zeros),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype,\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data,\n self.scales_and_zeros,\n self.transposed,\n shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"scales_and_zeros\"], (\n self.transposed,\n self.groupsize,\n self.inner_k_tiles,\n self.dtype,\n self.shape\n )\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, attributes, outer_size=None, outer_stride=None):\n int_data, scales_and_zeros = (\n tensor_data_dict[\"int_data\"],\n tensor_data_dict[\"scales_and_zeros\"],\n )\n transposed, groupsize, inner_k_tiles, dtype, shape = attributes\n return cls(\n int_data,\n scales_and_zeros,\n transposed,\n shape if outer_size is None else outer_size,\n groupsize,\n inner_k_tiles,\n dtype=dtype,\n strides=outer_stride,\n )\n\n @classmethod\n def from_float(cls, input_float, groupsize=128, inner_k_tiles=8):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int4WeightOnlyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n assert groupsize in [256, 128, 64, 32]\n assert inner_k_tiles in [8, 4, 2]\n orig_shape = input_float.shape\n orig_out_features, orig_in_features = input_float.shape\n\n # padding\n in_features = find_multiple(orig_in_features, 1024)\n out_features = find_multiple(orig_out_features, 8)\n input_float = torch.nn.functional.pad(\n input_float, (0, in_features - orig_in_features, 0, out_features - orig_out_features)\n )\n\n # quantization and packing\n input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor(\n input_float, 4, groupsize\n )\n int_data = aten._convert_weight_to_int4pack(\n input_int4x8, inner_k_tiles\n )\n\n return cls(\n int_data,\n scales_and_zeros,\n False,\n orig_shape,\n groupsize,\n inner_k_tiles,\n dtype=input_float.dtype,\n )" }, { "identifier": "_apply_logging_hook", "path": "torchao/quantization/utils.py", "snippet": "def find_multiple(n: int, k: int) -> int:\ndef compute_error(x, y):\ndef _get_logging_hook(fqn):\n def forward_hook(module, input):\ndef _apply_logging_hook(model):\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\ndef get_model_size_in_bytes(model):\nclass LoggingTensorMode(TorchDispatchMode):" } ]
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
11,332
qconfig_mapping = QConfigMapping().set_global(qconfig) lin_ref_p = prepare_fx(lin_ref, qconfig_mapping, (torch.randn(1, 1),)) lin_ref_q = convert_to_reference_fx(lin_ref_p) y_q_ref = lin_ref_q(x.float()) # scale, zp of weight (get from reference model) w_obs = qconfig.weight() w_obs(weight) lin_ref_w_scale, lin_ref_w_zp = w_obs.calculate_qparams() lin_ref_w_scale = lin_ref_w_scale.to(device).to(float_dtype) # print('lin_ref_w', 'scale', lin_ref_w_scale, 'zp', lin_ref_w_zp) w_vals, _s, _z = dynamically_quantize_per_channel( getattr(lin_ref_q, "0").weight.to(float_dtype), -128, 127, torch.int8 ) w_vals = w_vals.t().contiguous() w_vals_sums = w_vals.sum(dim=0) # do our version of the quantized linear operator y = quant_int8_dynamic_linear( x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater(
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W) smoothquant_scale = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) # reproduce scaled calculation X_scaled = X / smoothquant_scale.reshape(1, 1, -1) W_scaled = torch.matmul(torch.diag(smoothquant_scale), W) X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled) assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!" assert X_mul_W.shape == X_scaled_mul_scaled_W.shape # next, run the above test on a sample of representative inputs def test_tensors(self): x_shape = (1, 5, 7) w_shape = (7, 9) for i in range(3): X = torch.randn(x_shape) * 10 W = torch.randn(w_shape) s = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) Y = torch.matmul(X, W) Y_ref = torch.matmul( X / s.reshape(1, 1, -1), torch.matmul(torch.diag(s), W), ) assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!" def _test_smooth_linear_impl(self, x_shape, lin_shape, device): # so we can use the full range torch.backends.quantized.engine = "qnnpack" x = torch.randn(*x_shape, device=device) * 9 + 10 lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore activation=None, weight=torch.ao.quantization.default_per_channel_weight_observer, ) lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float( lin_fp32_copy.cpu() ) y_ref = lin_fp32(x) # calibrate the smoothquant versions y_smooth_nocalib = lin_smooth(x) _ = lin_smooth_skip_scaling(x) lin_smooth.to_inference() lin_smooth_skip_scaling.debug_skip_scaling = True lin_smooth_skip_scaling.to_inference() # verify that with scaling turned off, numerics match quantized version y_smooth_fq_only = lin_smooth_skip_scaling(x) y_smooth_fq = lin_smooth(x) y_dynamic_q = lin_dynamic_q(x.cpu()).to(device) # print('y_ref', y_ref) # print('y_smooth_nocalib', y_smooth_nocalib) # print('y_smooth_fq', y_smooth_fq) # print('y_smooth_fq_only', y_smooth_fq_only) # print('y_dynamic_q', y_dynamic_q) sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq) sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q) sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q) # print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq) assert torch.allclose( y_ref, y_smooth_nocalib ), "y_ref not close to y_smooth_nocalib" # after https://github.com/pytorch-labs/ao_benchmarks/pull/32, # numerics do not match exactly between production c++ code # and this Python code # assert torch.allclose( # y_smooth_fq_only, y_dynamic_q, # atol=torch.max(y_smooth_fq_only).item()*0.01, # rtol=0.00001), \ # 'y_smooth_fq_only not close to y_dynamic_q' self.assertTrue(sqnr_smooth_fq.item() >= 40.0) self.assertTrue(sqnr_dynamic_q.item() >= 40.0) self.assertTrue(sqnr_fq.item() >= 40.0) def test_smooth_linear_cpu(self): self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu") def test_smooth_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda") def test_smooth_linear_edge_cases(self): # so we can use the full range torch.backends.quantized.engine = "qnnpack" lin_fp32 = nn.Linear(3, 4) lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( lin_fp32, alpha=0.25 ) # test different ranks x0 = torch.randn(4, 5, 3) x1 = torch.randn(1, 8, 5, 3) x2 = torch.randn(2, 3, 7, 5, 3) # calibrate _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) # inference lin_smooth.to_inference() _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) def test_swap(self): m = nn.Sequential( nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)), nn.Linear(4, 4), ) m_copy = copy.deepcopy(m) swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) # verify all linears are swapped assert isinstance(m_copy[0][0], SmoothFakeDynamicallyQuantizedLinear) assert isinstance(m_copy[0][1], nn.ReLU) # this one was skipped assert isinstance(m_copy[0][2], nn.Linear) assert isinstance(m_copy[1], SmoothFakeDynamicallyQuantizedLinear) # verify results do not change without smoothing x = torch.randn(4, 4) y_ref = m(x) y = m_copy(x) assert torch.allclose(y_ref, y) def test_weight_t_and_non_t_numerics_match(self): # verify that numerics match whether weight is stored # in transposed format (for cuBLAS) vs non-transposed format # (for torch.compile) if not torch.cuda.is_available(): print("no cuda, skip") return dtype = torch.half device = "cuda" lin_ref = nn.Linear(32, 16, dtype=dtype, device=device) lin_eager_t = copy.deepcopy(lin_ref) lin_opt_t = copy.deepcopy(lin_eager_t) lin_opt = copy.deepcopy(lin_eager_t) lin_eager_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_eager_t) lin_opt_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt_t) lin_opt = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt) lin_opt.store_w_int_repr_t = False x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again x_dq = dequantize_per_tensor(y_vals, y_scale, y_zero_point, float_dtype) y_ref_dq = y_ref.dequantize().to(float_dtype) if float_dtype == torch.float: torch.testing.assert_close(x_dq, y_ref_dq) else: sqnr = compute_error(x_dq, y_ref_dq) self.assertTrue(sqnr.item() > 45.0) def test_dynamic_quant_per_tensor_numerics_cpu(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu test_cases = ( ( 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def test_dynamic_quant_per_tensor_numerics_cuda(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def _test_dynamic_quant_per_channel_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): # verifies that dynamic quant per channel in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu x = torch.randn(16, 32, device=device, dtype=float_dtype) y_vals, y_scale, y_zero_point = dynamically_quantize_per_channel( x, qmin, qmax, int_dtype ) min_val, max_val = torch.aminmax(x, dim=1) # reference weight_obs = torch.ao.quantization.MovingAveragePerChannelMinMaxObserver( dtype=qint_dtype, quant_min=qmin, quant_max=qmax, qscheme=torch.per_channel_symmetric, averaging_constant=1.0, # make it ignore previous iterations ) weight_obs(x) y_ref_scale, y_ref_zp = weight_obs.calculate_qparams() y_ref_scale = y_ref_scale.to(device) y_ref_zp = y_ref_zp.to(device) # quantize_per_channel doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x y_ref = torch.quantize_per_channel( x_for_ref, y_ref_scale, y_ref_zp, 0, qint_dtype ) torch.testing.assert_close( y_scale, y_ref.q_per_channel_scales().to(float_dtype) ) assert torch.equal(y_zero_point, y_ref.q_per_channel_zero_points()) # this test case has one element where the rounding is off by one # from Python-only code vs the c++ code, it's easy to repro with # various shapes. # Discussion here is relevant: https://github.com/pytorch/pytorch/issues/16498 # TODO(future): figure out what to do about this # assert torch.equal(int_vals, q_reference.int_repr()) assert torch.max(torch.abs(y_vals - y_ref.int_repr())) <= 1 # dequantize x_dq = dequantize_per_channel(y_vals, y_scale, y_zero_point) x_ref_dq = y_ref.dequantize() # off-by-one for scale is okay torch.testing.assert_close( x_dq, x_ref_dq, atol=torch.max(y_scale).item() * 1.01, rtol=0.0001 ) def test_dynamic_quant_per_channel_numerics_cpu(self): test_cases = ((-128, 127, torch.int8, torch.qint8, torch.float32, "cpu"),) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def test_dynamic_quant_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( (-128, 127, torch.int8, torch.qint8, torch.float32, "cuda"), (-128, 127, torch.int8, torch.qint8, torch.float16, "cuda"), ) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def _test_quantize_per_token_impl(self, device, dtype): x = torch.randn(3, 3, 3, device=device, dtype=dtype) xq, scales = quantize_activation_per_token_absmax(x) x_dq = dequantize_per_tensor(xq, scales, None).to(x.dtype) sqnr = compute_error(x, x_dq) self.assertTrue(sqnr >= 45.0) def test_quantize_per_token_cpu(self): for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cpu", dtype) def test_quantize_per_token_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cuda", dtype) def _test_per_token_linear_impl(self, device, dtype): x = torch.randn(2, 16, 8, device=device, dtype=dtype) w = torch.randn(16, 8, device=device, dtype=dtype) wq, w_scales, _w_zp = dynamically_quantize_per_channel(w, -127, 127, torch.int8) # Note: need to make the weight contiguous because we are # testing in eager mode and cuBlas will not give correct results # for a transposed weight y = quant_int8_dynamic_per_token_linear( x, wq.t().contiguous(), w_scales, None, dtype ) y_ref = torch.matmul(x, w.t()) sqnr = compute_error(y_ref, y) self.assertTrue(sqnr >= 42.0) def test_per_token_linear_cpu(self): for dtype in (torch.float32,): self._test_per_token_linear_impl("cpu", dtype) def test_per_token_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_per_token_linear_impl("cuda", dtype) def test__int_mm(self): # TODO(future): figure out what here needs to move to PT core, # if it's not already tested there if not torch.cuda.is_available(): print("no cuda, skip") return m, k, n = 32, 32, 16 x = torch.randint(-128, 127, (m, k), dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, (k, n), dtype=torch.int8, device="cuda") y_ref = torch.matmul(x.float(), w.float()).to(torch.int32) y_raw = safe_int_mm(x, w) wrap_in_mm_opt = torch.compile(safe_int_mm, mode="max-autotune") # note: triton chokes on the line below on k == 8 and n == 8 with # https://www.internalfb.com/phabricator/paste/view/P683467944 # TODO(future): file an issue y_opt = wrap_in_mm_opt(x, w) torch.testing.assert_close(y_ref, y_raw, atol=0, rtol=0) torch.testing.assert_close(y_ref, y_opt, atol=0, rtol=0) def test__int_mm_eager_and_torch_compile_numerics(self): if not torch.cuda.is_available(): print("no cuda, skip") return def __int_mm_ref(x, w): x = x.cpu().to(torch.int32) w = w.cpu().to(torch.int32) y = torch.matmul(x, w) return y.cuda() shapes = ( # minimal test shape ((1, 32, 32), (32, 16)), # paste of real linear shapes from LLaMa 1.5b ((17, 1, 1536), (1536, 1536)), ((17, 8, 4096), (4096, 1536)), ((17, 1, 1536), (1536, 4096)), ((17, 8, 1536), (1536, 1536)), ((17, 1, 4096), (4096, 1536)), ((17, 8, 1536), (1536, 4096)), ) for x_shape, w_shape in shapes: def wrap_torch_int_mm(x, w): b, n, k = x.shape k, m = w.shape x = x.reshape(b * n, k) res = safe_int_mm(x, w) res = res.reshape(b, n, m) return res wrap_torch_int_mm_opt = torch.compile( wrap_torch_int_mm, mode="max-autotune" ) x = torch.randint(-128, 127, x_shape, dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, w_shape, dtype=torch.int8, device="cuda") z_ref = __int_mm_ref(x, w) z_eager = wrap_torch_int_mm(x, w) z_torch_compile = wrap_torch_int_mm_opt(x, w) # print(z_ref) # print(z_eager) # print(z_torch_compile) torch.testing.assert_close(z_ref, z_eager, atol=0, rtol=0) torch.testing.assert_close(z_ref, z_torch_compile, atol=0, rtol=0) def _test_qlinear_per_channel_numerics( self, x_shape, lin_shape, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): qconfig = torch.ao.quantization.per_channel_dynamic_qconfig x = torch.randn(*x_shape, device=device, dtype=float_dtype) # TODO: test bias true and false # Note: reference path only works on float because lack of aten quant primitives # support of half, so we cast back and forth to emulate lin_ref = ( nn.Sequential(nn.Linear(*lin_shape)) .eval() .to(float_dtype) .float() .to(device) ) y_ref = lin_ref(x.float()) weight = lin_ref[0].weight bias = lin_ref[0].bias qconfig_mapping = QConfigMapping().set_global(qconfig) lin_ref_p = prepare_fx(lin_ref, qconfig_mapping, (torch.randn(1, 1),)) lin_ref_q = convert_to_reference_fx(lin_ref_p) y_q_ref = lin_ref_q(x.float()) # scale, zp of weight (get from reference model) w_obs = qconfig.weight() w_obs(weight) lin_ref_w_scale, lin_ref_w_zp = w_obs.calculate_qparams() lin_ref_w_scale = lin_ref_w_scale.to(device).to(float_dtype) # print('lin_ref_w', 'scale', lin_ref_w_scale, 'zp', lin_ref_w_zp) w_vals, _s, _z = dynamically_quantize_per_channel( getattr(lin_ref_q, "0").weight.to(float_dtype), -128, 127, torch.int8 ) w_vals = w_vals.t().contiguous() w_vals_sums = w_vals.sum(dim=0) # do our version of the quantized linear operator y = quant_int8_dynamic_linear( x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater(
SQNR(w, lin.weight.dequantize()),
21
2023-11-03 21:27:36+00:00
16k
Zaczero/openstreetmap-ng
src/controllers/api/0.6/changeset_comment.py
[ { "identifier": "api_user", "path": "src/lib/auth.py", "snippet": "def api_user(*require_scopes: Scope | ExtendedScope) -> User:\n \"\"\"\n Dependency for authenticating the api user.\n \"\"\"\n\n return Security(\n _get_user,\n scopes=tuple(s.value for s in require_scopes),\n )" }, { "identifier": "Format06", "path": "src/lib/format/format06.py", "snippet": "class Format06:\n @staticmethod\n def _encode_tags(tags: dict) -> Sequence[dict] | dict:\n if format_is_json():\n return tags\n else:\n return tuple({'@k': k, '@v': v} for k, v in tags.items())\n\n @staticmethod\n def _decode_tags_unsafe(tags: Sequence[dict]) -> dict:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_tags_unsafe([\n ... {'@k': 'a', '@v': '1'},\n ... {'@k': 'b', '@v': '2'},\n ... ])\n {'a': '1', 'b': '2'}\n \"\"\"\n\n items = tuple((tag['@k'], tag['@v']) for tag in tags)\n result = dict(items)\n\n if len(items) != len(result):\n raise ValueError('Duplicate tags keys')\n\n return result\n\n @staticmethod\n def decode_tags_and_validate(tags: Sequence[dict]) -> dict:\n \"\"\"\n >>> decode_tags_and_validate([\n ... {'@k': 'a', '@v': '1'},\n ... {'@k': 'b', '@v': '2'},\n ... ])\n {'a': '1', 'b': '2'}\n \"\"\"\n\n return TagsValidating(tags=Format06._decode_tags_unsafe(tags)).tags\n\n @staticmethod\n def _encode_point(point: Point | None) -> dict:\n \"\"\"\n >>> _encode_point(Point(1, 2))\n {'@lon': 1, '@lat': 2}\n \"\"\"\n\n if not point:\n return {}\n\n return {\n XAttr('lon'): point.x,\n XAttr('lat'): point.y,\n }\n\n @staticmethod\n def _decode_point_unsafe(data: dict) -> Point | None:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_point_unsafe({'@lon': '1', '@lat': '2'})\n POINT (1 2)\n \"\"\"\n\n if (lon := data.get('@lon')) is None or (lat := data.get('@lat')) is None:\n return None\n\n return Point(\n float(lon),\n float(lat),\n )\n\n @staticmethod\n def _encode_nodes(nodes: Sequence[ElementMemberRef]) -> Sequence[dict] | Sequence[int]:\n \"\"\"\n >>> _encode_nodes([\n ... ElementMember(type=ElementType.node, typed_id=1, role=''),\n ... ElementMember(type=ElementType.node, typed_id=2, role=''),\n ... ])\n ({'@ref': 1}, {'@ref': 2})\n \"\"\"\n\n if format_is_json():\n return tuple(node.typed_id for node in nodes)\n else:\n return tuple({'@ref': node.typed_id} for node in nodes)\n\n @staticmethod\n def _decode_nodes_unsafe(nodes: Sequence[dict]) -> Sequence[ElementMemberRef]:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_nodes_unsafe([{'@ref': '1'}])\n [ElementMember(type=ElementType.node, typed_id=1, role='')]\n \"\"\"\n\n return tuple(\n ElementMemberRef(\n type=ElementType.node,\n typed_id=int(node['@ref']),\n role='',\n )\n for node in nodes\n )\n\n @staticmethod\n def _encode_members(members: Sequence[ElementMemberRef]) -> Sequence[dict]:\n \"\"\"\n >>> _encode_members([\n ... ElementMember(type=ElementType.node, typed_id=1, role='a'),\n ... ElementMember(type=ElementType.way, typed_id=2, role='b'),\n ... ])\n (\n {'@type': 'node', '@ref': 1, '@role': 'a'},\n {'@type': 'way', '@ref': 2, '@role': 'b'},\n )\n \"\"\"\n\n return tuple(\n {\n XAttr('type'): member.type.value,\n XAttr('ref'): member.typed_id,\n XAttr('role'): member.role,\n }\n for member in members\n )\n\n @staticmethod\n def _decode_members_unsafe(members: Sequence[dict]) -> Sequence[ElementMemberRef]:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_members_unsafe([\n ... {'@type': 'node', '@ref': '1', '@role': 'a'},\n ... ])\n [ElementMember(type=ElementType.node, typed_id=1, role='a')]\n \"\"\"\n\n return tuple(\n ElementMemberRef(\n type=ElementType.from_str(member['@type']),\n typed_id=int(member['@ref']),\n role=member['@role'],\n )\n for member in members\n )\n\n @staticmethod\n def encode_element(element: Element) -> dict:\n \"\"\"\n >>> encode_element(Element(type=ElementType.node, typed_id=1, version=1, ...))\n {'node': {'@id': 1, '@version': 1, ...}}\n \"\"\"\n\n if format_is_json():\n return {\n 'type': element.type.value,\n 'id': element.typed_id,\n **(Format06._encode_point(element.point) if element.type == ElementType.node else {}),\n 'version': element.version,\n 'timestamp': element.created_at,\n 'changeset': element.changeset_id,\n 'uid': element.user_id,\n 'user': element.user.display_name,\n 'visible': element.visible,\n 'tags': element.tags,\n **({'nodes': Format06._encode_nodes(element.members)} if element.type == ElementType.way else {}),\n **(\n {'members': Format06._encode_members(element.members)}\n if element.type == ElementType.relation\n else {}\n ),\n }\n else:\n return {\n element.type.value: {\n '@id': element.typed_id,\n **(Format06._encode_point(element.point) if element.type == ElementType.node else {}),\n '@version': element.version,\n '@timestamp': element.created_at,\n '@changeset': element.changeset_id,\n '@uid': element.user_id,\n '@user': element.user.display_name,\n '@visible': element.visible,\n 'tag': Format06._encode_tags(element.tags),\n **({'nd': Format06._encode_nodes(element.members)} if element.type == ElementType.way else {}),\n **(\n {'member': Format06._encode_members(element.members)}\n if element.type == ElementType.relation\n else {}\n ),\n }\n }\n\n @staticmethod\n def decode_element(element: dict, changeset_id: int | None) -> Element:\n \"\"\"\n If `changeset_id` is `None`, it will be extracted from the element data.\n \"\"\"\n\n if len(element) != 1:\n raise ValueError(f'Expected one root element, got {len(element)}')\n\n type, data = next(iter(element.items()))\n type = ElementType.from_str(type)\n data: dict\n\n # decode members from either nd or member\n if data_nodes := data.get('nd'):\n members = Format06._decode_nodes_unsafe(data_nodes)\n elif data_members := data.get('member'):\n members = Format06._decode_members_unsafe(data_members)\n else:\n members = ()\n\n return Element(\n **ElementValidating(\n user_id=auth_user().id,\n changeset_id=changeset_id or data.get('@changeset'),\n type=type,\n typed_id=data.get('@id'),\n version=data.get('@version', 0) + 1,\n visible=data.get('@visible', True),\n tags=Format06._decode_tags_unsafe(data.get('tag', ())),\n point=Format06._decode_point_unsafe(data),\n members=members,\n ).to_orm_dict()\n )\n\n @staticmethod\n def encode_elements(elements: Sequence[Element]) -> dict[str, Sequence[dict]]:\n \"\"\"\n >>> encode_elements([\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.way, typed_id=2, version=1,\n ... ])\n {'node': [{'@id': 1, '@version': 1, ...}], 'way': [{'@id': 2, '@version': 1, ...}]}\n \"\"\"\n\n if format_is_json():\n return {'elements': tuple(Format06.encode_element(element) for element in elements)}\n else:\n result: dict[str, list[dict]] = defaultdict(list)\n for element in elements:\n result[element.type.value].append(Format06.encode_element(element))\n return result\n\n @staticmethod\n def _encode_changeset_comment(comment: ChangesetComment) -> dict:\n \"\"\"\n >>> _encode_changeset_comment(ChangesetComment(...))\n {'@uid': 1, '@user': ..., '@date': ..., 'text': 'lorem ipsum'}\n \"\"\"\n\n return {\n XAttr('id'): comment.id,\n XAttr('date'): comment.created_at,\n XAttr('uid'): comment.user_id,\n XAttr('user'): comment.user.display_name,\n 'text': comment.body,\n }\n\n @staticmethod\n def encode_changeset(changeset: Changeset, *, add_comments_count: int = 0) -> dict:\n \"\"\"\n >>> encode_changeset(Changeset(...))\n {'changeset': {'@id': 1, '@created_at': ..., ..., 'discussion': {'comment': [...]}}}\n \"\"\"\n\n if changeset.boundary:\n minx, miny, maxx, maxy = changeset.boundary.bounds\n boundary_d = {\n XAttr('minlon', custom_xml='min_lon'): minx,\n XAttr('minlat', custom_xml='min_lat'): miny,\n XAttr('maxlon', custom_xml='max_lon'): maxx,\n XAttr('maxlat', custom_xml='max_lat'): maxy,\n }\n else:\n boundary_d = {}\n\n try:\n _ = changeset.comments\n has_comments = True\n except InvalidRequestError:\n has_comments = False\n\n if format_is_json():\n return {\n 'type': 'changeset',\n 'id': changeset.id,\n 'created_at': changeset.created_at,\n **({'closed_at': changeset.closed_at} if changeset.closed_at else {}),\n 'open': not changeset.closed_at,\n 'uid': changeset.user_id,\n 'user': changeset.user.display_name,\n **boundary_d,\n 'comments_count': len(changeset.comments) + add_comments_count,\n 'changes_count': changeset.size,\n 'tags': changeset.tags,\n **(\n {'discussion': tuple(Format06._encode_changeset_comment(comment) for comment in changeset.comments)}\n if has_comments\n else {}\n ),\n }\n else:\n return {\n 'changeset': {\n '@id': changeset.id,\n '@created_at': changeset.created_at,\n **({'@closed_at': changeset.closed_at} if changeset.closed_at else {}),\n '@open': not changeset.closed_at,\n '@uid': changeset.user_id,\n '@user': changeset.user.display_name,\n **boundary_d,\n '@comments_count': len(changeset.comments) + add_comments_count,\n '@changes_count': changeset.size,\n 'tag': Format06._encode_tags(changeset.tags),\n **(\n {\n 'discussion': {\n 'comment': tuple(\n Format06._encode_changeset_comment(comment) for comment in changeset.comments\n ),\n }\n }\n if has_comments\n else {}\n ),\n }\n }\n\n @staticmethod\n def encode_changesets(changesets: Sequence[Changeset]) -> dict:\n \"\"\"\n >>> encode_changesets([\n ... Changeset(...),\n ... Changeset(...),\n ... ])\n {'changeset': [{'@id': 1, '@created_at': ..., ..., 'discussion': {'comment': [...]}}]}\n \"\"\"\n\n if format_is_json():\n return {'elements': tuple(Format06.encode_changeset(changeset) for changeset in changesets)}\n else:\n return {'changeset': tuple(Format06.encode_changeset(changeset)['changeset'] for changeset in changesets)}\n\n @staticmethod\n def encode_osmchange(elements: Sequence[Element]) -> Sequence[tuple[str, dict]]:\n \"\"\"\n >>> encode_osmchange([\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.way, typed_id=2, version=2, ...)\n ... ])\n [\n ('create', {'node': [{'@id': 1, '@version': 1, ...}]}),\n ('modify', {'way': [{'@id': 2, '@version': 2, ...}]}),\n ]\n \"\"\"\n\n result = [None] * len(elements)\n for i, element in len(elements):\n if element.version == 1:\n action = OSMChangeAction.create.value\n elif element.visible:\n action = OSMChangeAction.modify.value\n else:\n action = OSMChangeAction.delete.value\n result[i] = (action, Format06.encode_element(element))\n return result\n\n @staticmethod\n def decode_osmchange(elements: Sequence[tuple[str, dict]], changeset_id: int | None) -> Sequence[Element]:\n \"\"\"\n If `changeset_id` is `None`, it will be extracted from the element data.\n\n >>> decode_osmchange([\n ... ('create', {'node': [{'@id': 1, '@version': 1, ...}]}),\n ... ('modify', {'way': [{'@id': 2, '@version': 2, ...}]}),\n ... ])\n [Element(type=ElementType, ...), Element(type=ElementType.way, ...)]\n \"\"\"\n\n result = [None] * len(elements)\n\n for i, (action, element_d) in enumerate(elements):\n if len(element_d) != 1:\n raise ValueError(f'Expected one element in {action!r}, got {len(element_d)}')\n\n element = Format06.decode_element(element_d, changeset_id)\n\n if action == OSMChangeAction.create.value:\n if element.id > 0:\n raise_for().diff_create_bad_id(element.versioned_ref)\n if element.version > 1:\n element.version = 1\n elif action == OSMChangeAction.modify.value:\n if element.version < 2:\n raise_for().diff_update_bad_version(element.versioned_ref)\n elif action == OSMChangeAction.delete.value:\n if element.version < 2:\n raise_for().diff_update_bad_version(element.versioned_ref)\n if element.visible:\n element.visible = False\n else:\n raise_for().diff_unsupported_action(action)\n\n result[i] = element\n\n return result\n\n @staticmethod\n def encode_diff_result(assigned_ref_map: dict[TypedElementRef, Sequence[Element]]) -> Sequence[tuple]:\n \"\"\"\n >>> encode_diff_result({\n ... TypedElementRef(type=ElementType.node, typed_id=-1): [\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.node, typed_id=1, version=2, ...),\n ... ],\n ... })\n (\n ('node', {'@old_id': -1, '@new_id': 1, '@new_version': 1}),\n ('node', {'@old_id': -1, '@new_id': 1, '@new_version': 2})\n )\n \"\"\"\n\n return tuple(\n (\n typed_ref.type.value,\n {\n '@old_id': typed_ref.typed_id,\n '@new_id': element.typed_id,\n '@new_version': element.version,\n },\n )\n for typed_ref, elements in assigned_ref_map.items()\n for element in elements\n )\n\n @staticmethod\n def encode_tracks(trace_points: Sequence[TracePoint]) -> dict:\n \"\"\"\n >>> encode_tracks([\n ... TracePoint(...),\n ... TracePoint(...),\n ... ])\n {'trk': [{'trkseg': [{'trkpt': [{'@lon': 1, '@lat': 2}, {'@lon': 3, '@lat': 4}]}]}]}\n \"\"\"\n\n trks = []\n trk_trksegs = []\n trk_trkseg_trkpts = []\n\n last_trk_id = None\n last_trkseg_id = None\n\n for tp in trace_points:\n trace = tp.trace\n\n # if trace is available via api, encode full information\n if trace.timestamps_via_api:\n # handle track change\n if last_trk_id != trace.id:\n if trace.visibility == TraceVisibility.identifiable:\n url = f'/user/permalink/{trace.user_id}/traces/{trace.id}'\n else:\n url = None\n\n trk_trksegs = []\n trks.append(\n {\n 'name': trace.name,\n 'desc': trace.description,\n **({'url': url} if url else {}),\n 'trkseg': trk_trksegs,\n }\n )\n last_trk_id = trace.id\n last_trkseg_id = None\n\n # handle track segment change\n if last_trkseg_id != tp.track_idx:\n trk_trkseg_trkpts = []\n trk_trksegs.append({'trkpt': trk_trkseg_trkpts})\n last_trkseg_id = tp.track_idx\n\n # add point\n trk_trkseg_trkpts.append(\n {\n **Format06._encode_point(tp.point),\n **({'ele': tp.elevation} if tp.elevation is not None else {}),\n 'time': tp.captured_at,\n }\n )\n\n # otherwise, encode only coordinates\n else:\n # handle track and track segment change\n if last_trk_id is not None or last_trkseg_id is not None:\n trk_trksegs = []\n trks.append({'trkseg': trk_trksegs})\n trk_trkseg_trkpts = []\n trk_trksegs.append({'trkpt': trk_trkseg_trkpts})\n last_trk_id = None\n last_trkseg_id = None\n\n trk_trkseg_trkpts.append(Format06._encode_point(tp.point))\n\n return {'trk': trks}\n\n @staticmethod\n def decode_tracks(tracks: Sequence[dict], *, track_idx_start: int = 0) -> Sequence[TracePoint]:\n \"\"\"\n >>> decode_tracks([{'trkseg': [{'trkpt': [{'@lon': 1, '@lat': 2}]}]}])\n [TracePoint(...)]\n \"\"\"\n\n result = []\n\n for trk in tracks:\n trk: dict\n for track_idx, trkseg in enumerate(trk.get('trkseg', []), track_idx_start):\n trkseg: dict\n for trkpt in trkseg.get('trkpt', []):\n trkpt: dict\n\n result.append(\n TracePoint(\n **TracePointValidating(\n track_idx=track_idx,\n captured_at=datetime.fromisoformat(time) if (time := trkpt.get('time')) else None,\n point=Format06._decode_point_unsafe(trkpt),\n elevation=trkpt.get('ele'),\n ).to_orm_dict()\n )\n )\n\n return result\n\n @staticmethod\n def encode_gpx_file(trace: Trace) -> dict:\n \"\"\"\n >>> encode_gpx_file(Trace(...))\n {'gpx_file': {'@id': 1, '@uid': 1234, ...}}\n \"\"\"\n\n return {\n 'gpx_file': {\n '@id': trace.id,\n '@uid': trace.user_id,\n '@user': trace.user.display_name,\n '@timestamp': trace.created_at,\n '@name': trace.name,\n '@lon': trace.start_point.x,\n '@lat': trace.start_point.y,\n '@visibility': trace.visibility.value,\n '@pending': False,\n 'description': trace.description,\n 'tag': trace.tags,\n }\n }\n\n @staticmethod\n def encode_gpx_files(traces: Sequence[Trace]) -> dict:\n \"\"\"\n >>> encode_gpx_files([\n ... Trace(...),\n ... Trace(...),\n ... ])\n {'gpx_file': [{'@id': 1, '@uid': 1234, ...}, {'@id': 2, '@uid': 1234, ...}]}\n \"\"\"\n\n return {\n 'gpx_file': tuple(Format06.encode_gpx_file(trace) for trace in traces),\n }\n\n @staticmethod\n def decode_gpx_file(gpx_file: dict) -> Trace:\n return Trace(\n **TraceValidating(\n user_id=auth_user().id,\n name=gpx_file.get('@name'),\n description=gpx_file.get('description'),\n visibility=TraceVisibility(gpx_file.get('@visibility')),\n size=1,\n start_point=Point(0, 0),\n tags=gpx_file.get('tag', ()),\n ).to_orm_dict()\n )\n\n @staticmethod\n def _encode_note_comment(comment: NoteComment) -> dict:\n \"\"\"\n >>> _encode_note_comment(NoteComment(...))\n {'date': '2019-06-15 08:26:04 UTC', 'uid': 1234, 'user': 'userName', ...}\n \"\"\"\n\n return {\n 'date': format_sql_date(comment.created_at),\n 'uid': comment.user_id,\n 'user': comment.user.display_name,\n 'user_url': comment.user.permalink,\n 'action': comment.event.value,\n 'text': comment.body,\n 'html': comment.body_rich.value, # a disaster waiting to happen\n }\n\n @staticmethod\n def encode_note(note: Note) -> dict:\n \"\"\"\n >>> encode_note(Note(...))\n {'note': {'@lon': 0.1, '@lat': 51, 'id': 16659, ...}}\n \"\"\"\n\n style = format_style()\n\n if style == FormatStyle.json:\n return {\n 'type': 'Feature',\n 'geometry': mapping(note.point),\n 'properties': {\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}.json',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen.json',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment.json',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close.json',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'closed_at': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n 'comments': tuple(Format06._encode_note_comment(comment) for comment in note.comments),\n },\n }\n elif style == FormatStyle.gpx:\n return {\n 'wpt': {\n **Format06._encode_point(note.point),\n 'time': note.created_at,\n 'name': f'Note: {note.id}',\n 'link': {'href': note.permalink},\n 'desc': ET.CDATA(render('api/0.6/note_comments_rss.jinja2', comments=note.comments)),\n 'extensions': {\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}.gpx',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen.gpx',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment.gpx',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close.gpx',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'date_closed': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n },\n }\n }\n else:\n return {\n 'note': {\n **Format06._encode_point(note.point),\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'date_closed': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n 'comments': {\n 'comment': tuple(Format06._encode_note_comment(comment) for comment in note.comments),\n },\n }\n }\n\n @staticmethod\n def encode_notes(notes: Sequence[Note]) -> dict:\n \"\"\"\n >>> encode_notes([\n ... Note(...),\n ... Note(...),\n ... ])\n {'note': [{'@lon': 1, '@lat': 2, 'id': 1, ...}]}\n \"\"\"\n\n style = format_style()\n\n if style == FormatStyle.json:\n return {'type': 'FeatureCollection', 'features': tuple(Format06.encode_note(note) for note in notes)}\n elif style == FormatStyle.gpx:\n return {'wpt': tuple(Format06.encode_note(note)['wpt'] for note in notes)}\n else:\n return {'note': tuple(Format06.encode_note(note)['note'] for note in notes)}\n\n @staticmethod\n def _encode_languages(languages: Sequence[str]) -> dict | Sequence[str]:\n \"\"\"\n >>> _encode_languages(['en', 'pl'])\n {'lang': ('en', 'pl')}\n \"\"\"\n\n if format_is_json():\n return tuple(languages)\n else:\n return {'lang': tuple(languages)}\n\n @staticmethod\n async def encode_user(user: User) -> dict:\n \"\"\"\n >>> encode_user(User(...))\n {'user': {'@id': 1234, '@display_name': 'userName', ...}}\n \"\"\"\n\n current_user = auth_user()\n access_private = current_user and current_user.id == user.id\n\n changesets_count = 0\n traces_count = 0\n block_received_count = 0\n block_received_active_count = 0\n block_issued_count = 0\n block_issued_active_count = 0\n messages_received_count = 0\n messages_received_unread_count = 0\n messages_sent_count = 0\n\n async def changesets_count_task() -> None:\n nonlocal changesets_count\n changesets_count = await ChangesetRepository.count_by_user_id(user.id)\n\n async def traces_count_task() -> None:\n nonlocal traces_count\n traces_count = await TraceRepository.count_by_user_id(user.id)\n\n async def block_received_count_task() -> None:\n nonlocal block_received_count, block_received_active_count\n total, active = await UserBlockRepository.count_received_by_user_id(user.id)\n block_received_count = total\n block_received_active_count = active\n\n async def block_issued_count_task() -> None:\n nonlocal block_issued_count, block_issued_active_count\n total, active = await UserBlockRepository.count_given_by_user_id(user.id)\n block_issued_count = total\n block_issued_active_count = active\n\n async def messages_received_count_task() -> None:\n nonlocal messages_received_count, messages_received_unread_count\n total, unread = await MessageRepository.count_received_by_user_id(user.id)\n messages_received_count = total\n messages_received_unread_count = unread\n\n async def messages_sent_count_task() -> None:\n nonlocal messages_sent_count\n messages_sent_count = await MessageRepository.count_sent_by_user_id(user.id)\n\n async with anyio.create_task_group() as tg:\n tg.start_soon(changesets_count_task)\n tg.start_soon(traces_count_task)\n tg.start_soon(block_received_count_task)\n tg.start_soon(block_issued_count_task)\n\n if access_private:\n tg.start_soon(messages_received_count_task)\n tg.start_soon(messages_sent_count_task)\n\n return {\n 'user': {\n XAttr('id'): user.id,\n XAttr('display_name'): user.display_name,\n XAttr('account_created'): user.created_at,\n 'description': user.description,\n ('contributor_terms' if format_is_json() else 'contributor-terms'): {\n XAttr('agreed'): True,\n **({XAttr('pd'): user.consider_public_domain} if access_private else {}),\n },\n 'img': {XAttr('href'): user.avatar_url},\n 'roles': [role.value for role in user.roles],\n 'changesets': {XAttr('count'): changesets_count},\n 'traces': {XAttr('count'): traces_count},\n 'blocks': {\n 'received': {\n XAttr('count'): block_received_count,\n XAttr('active'): block_received_active_count,\n },\n 'issued': {\n XAttr('count'): block_issued_count,\n XAttr('active'): block_issued_active_count,\n },\n },\n # private section\n **(\n {\n **(\n {\n 'home': {\n **Format06._encode_point(user.home_point),\n XAttr('zoom'): user.home_zoom,\n }\n }\n if user.home_point\n else {}\n ),\n 'languages': Format06._encode_languages(user.languages),\n 'messages': {\n 'received': {\n XAttr('count'): messages_received_count,\n XAttr('unread'): messages_received_unread_count,\n },\n 'sent': {XAttr('count'): messages_sent_count},\n },\n }\n if access_private\n else {}\n ),\n }\n }\n\n @staticmethod\n async def encode_users(users: Sequence[User]) -> dict:\n \"\"\"\n >>> encode_users([\n ... User(...),\n ... User(...),\n ... ])\n {'user': [{'@id': 1234, '@display_name': 'userName', ...}]}\n \"\"\"\n\n encoded = [None] * len(users)\n\n async def task(i: int, user: User):\n encoded[i] = await Format06.encode_user(user)\n\n async with anyio.create_task_group() as tg:\n for i, user in enumerate(users):\n tg.start_soon(task, i, user)\n\n if format_is_json():\n return {'users': tuple(user for user in encoded)}\n else:\n return {'user': tuple(user['user'] for user in encoded)}\n\n @staticmethod\n def decode_user_preference(pref: dict) -> UserPref:\n \"\"\"\n >>> decode_user_preference({'@k': 'key', '@v': 'value'})\n UserPref(key='key', value='value')\n \"\"\"\n\n return UserPref(\n **UserPrefValidating(\n user_id=auth_user().id,\n app_id=None, # 0.6 api does not support prefs partitioning\n key=pref['@k'],\n value=pref['@v'],\n ).to_orm_dict()\n )\n\n @staticmethod\n def decode_user_preferences(prefs: Sequence[dict]) -> Sequence[UserPref]:\n \"\"\"\n >>> decode_user_preferences([{'@k': 'key', '@v': 'value'}])\n [UserPref(key='key', value='value')]\n \"\"\"\n\n seen_keys = set()\n\n for pref in prefs:\n key = pref['@k']\n if key in seen_keys:\n raise_for().pref_duplicate_key(key)\n seen_keys.add(key)\n\n return tuple(Format06.decode_user_preference(pref) for pref in prefs)\n\n @staticmethod\n def encode_user_preferences(prefs: Sequence[UserPref]) -> dict:\n \"\"\"\n >>> encode_user_preferences([\n ... UserPref(key='key1', value='value1'),\n ... UserPref(key='key2', value='value2'),\n ... ])\n {'preferences': {'preference': [{'@k': 'key1', '@v': 'value1'}, {'@k': 'key2', '@v': 'value2'}]}}\n \"\"\"\n\n if format_is_json():\n return {\n 'preferences': {pref.key: pref.value for pref in prefs},\n }\n else:\n return {\n 'preferences': {\n 'preference': tuple(\n {\n '@k': pref.key,\n '@v': pref.value,\n }\n for pref in prefs\n )\n }\n }" }, { "identifier": "CHANGESET_COMMENT_BODY_MAX_LENGTH", "path": "src/limits.py", "snippet": "CHANGESET_COMMENT_BODY_MAX_LENGTH = 5_000 # NOTE: value TBD" }, { "identifier": "User", "path": "src/models/db/user.py", "snippet": "class User(Base.Sequential, CreatedAtMixin, RichTextMixin):\n __tablename__ = 'user'\n __rich_text_fields__ = (('description', TextFormat.markdown),)\n\n email: Mapped[str] = mapped_column(Unicode(EMAIL_MAX_LENGTH), nullable=False)\n display_name: Mapped[str] = mapped_column(Unicode, nullable=False)\n password_hashed: Mapped[str] = mapped_column(Unicode, nullable=False)\n created_ip: Mapped[IPv4Address | IPv6Address] = mapped_column(INET, nullable=False)\n\n status: Mapped[UserStatus] = mapped_column(Enum(UserStatus), nullable=False)\n\n auth_provider: Mapped[AuthProvider | None] = mapped_column(Enum(AuthProvider), nullable=True)\n auth_uid: Mapped[str | None] = mapped_column(Unicode, nullable=True)\n\n languages: Mapped[list[str]] = mapped_column(ARRAY(Unicode(LANGUAGE_CODE_MAX_LENGTH)), nullable=False)\n\n # defaults\n password_changed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=func.now())\n password_salt: Mapped[str | None] = mapped_column(Unicode, nullable=True, default=None)\n consider_public_domain: Mapped[bool] = mapped_column(Boolean, nullable=False)\n roles: Mapped[list[UserRole]] = mapped_column(ARRAY(Enum(UserRole)), nullable=False, default=())\n description: Mapped[str] = mapped_column(UnicodeText, nullable=False, default='')\n description_rich_hash: Mapped[bytes | None] = mapped_column(LargeBinary(HASH_SIZE), nullable=True, default=None)\n description_rich: Mapped[CacheEntry | None] = relationship(\n CacheEntry,\n primaryjoin=CacheEntry.id == description_rich_hash,\n viewonly=True,\n default=None,\n lazy='raise',\n )\n editor: Mapped[Editor | None] = mapped_column(Enum(Editor), nullable=True, default=None)\n avatar_type: Mapped[AvatarType] = mapped_column(Enum(AvatarType), nullable=False, default=AvatarType.default)\n avatar_id: Mapped[str | None] = mapped_column(Unicode(STORAGE_KEY_MAX_LENGTH), nullable=True, default=None)\n home_point: Mapped[Point | None] = mapped_column(PointType, nullable=True, default=None)\n home_zoom: Mapped[int | None] = mapped_column(SmallInteger, nullable=True, default=None)\n\n # relationships (avoid circular imports)\n if TYPE_CHECKING:\n from src.models.db.oauth1_application import OAuth1Application\n from src.models.db.oauth2_application import OAuth2Application\n from src.models.db.user_block import UserBlock\n\n oauth1_applications: Mapped[list['OAuth1Application']] = relationship(\n back_populates='user',\n order_by='OAuth1Application.id.asc()',\n lazy='raise',\n )\n oauth2_applications: Mapped[list['OAuth2Application']] = relationship(\n back_populates='user',\n order_by='OAuth2Application.id.asc()',\n lazy='raise',\n )\n user_blocks_given: Mapped[list['UserBlock']] = relationship(\n back_populates='from_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n active_user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n primaryjoin='and_(UserBlock.to_user_id == User.id, UserBlock.expired == false())',\n viewonly=True,\n )\n\n __table_args__ = (\n UniqueConstraint(email),\n UniqueConstraint(display_name),\n )\n\n @validates('languages')\n def validate_languages(self, _: str, value: Sequence[str]):\n if len(value) > USER_LANGUAGES_LIMIT:\n raise ValueError('Too many languages')\n return value\n\n @validates('description')\n def validate_description(self, _: str, value: str):\n if len(value) > USER_DESCRIPTION_MAX_LENGTH:\n raise ValueError('Description is too long')\n return value\n\n @property\n def is_administrator(self) -> bool:\n \"\"\"\n Check if the user is an administrator.\n \"\"\"\n\n return UserRole.administrator in self.roles\n\n @property\n def is_moderator(self) -> bool:\n \"\"\"\n Check if the user is a moderator.\n \"\"\"\n\n return UserRole.moderator in self.roles or self.is_administrator\n\n @property\n def extended_scopes(self) -> Sequence[ExtendedScope]:\n \"\"\"\n Get the user's extended scopes.\n \"\"\"\n\n result = []\n\n # role-specific scopes\n if self.is_administrator:\n result.append(ExtendedScope.role_administrator)\n if self.is_moderator:\n result.append(ExtendedScope.role_moderator)\n\n return result\n\n @property\n def permalink(self) -> str:\n \"\"\"\n Get the user's permalink.\n\n >>> user.permalink\n 'https://www.openstreetmap.org/user/permalink/123456'\n \"\"\"\n\n return f'{APP_URL}/user/permalink/{self.id}'\n\n @property\n def languages_str(self) -> str:\n return ' '.join(self.languages)\n\n @languages_str.setter\n def languages_str(self, s: str) -> None:\n languages = s.split()\n languages = (t.strip()[:LANGUAGE_CODE_MAX_LENGTH].strip() for t in languages)\n languages = (normalize_language_case(t) for t in languages)\n languages = (t for t in languages if t)\n self.languages = tuple(set(languages))\n\n @property\n def preferred_diary_language(self) -> LanguageInfo:\n \"\"\"\n Get the user's preferred diary language.\n \"\"\"\n\n # return the first valid language\n for code in self.languages:\n if lang := get_language_info(code):\n return lang\n\n # fallback to default\n return get_language_info(DEFAULT_LANGUAGE)\n\n @property\n def changeset_max_size(self) -> int:\n \"\"\"\n Get the maximum changeset size for this user.\n \"\"\"\n\n return UserRole.get_changeset_max_size(self.roles)\n\n @property\n def password_hasher(self) -> PasswordHash:\n \"\"\"\n Get the password hash class for this user.\n \"\"\"\n\n return PasswordHash(UserRole.get_password_hasher(self.roles))\n\n @property\n def avatar_url(self) -> str:\n \"\"\"\n Get the url for the user's avatar image.\n \"\"\"\n\n # when using gravatar, use user id as the avatar id\n if self.avatar_type == AvatarType.gravatar:\n return Avatar.get_url(self.avatar_type, self.id)\n else:\n return Avatar.get_url(self.avatar_type, self.avatar_id)\n\n async def home_distance_to(self, point: Point | None) -> float | None:\n return haversine_distance(self.home_point, point) if self.home_point and point else None" }, { "identifier": "ExtendedScope", "path": "src/models/scope.py", "snippet": "class ExtendedScope(BaseEnum):\n \"\"\"\n Extended scopes with entries that are not obtainable by normal means.\n \"\"\"\n\n read_prefs = 'read_prefs'\n write_prefs = 'write_prefs'\n write_diary = 'write_diary'\n write_api = 'write_api'\n read_gpx = 'read_gpx'\n write_gpx = 'write_gpx'\n write_notes = 'write_notes'\n\n # additional scopes\n read_email = 'read_email'\n skip_authorization = 'skip_authorization'\n\n web_user = 'web_user'\n\n # role-specific scopes\n role_moderator = 'role_moderator'\n role_administrator = 'role_administrator'" }, { "identifier": "Scope", "path": "src/models/scope.py", "snippet": "class Scope(BaseEnum):\n read_prefs = 'read_prefs'\n write_prefs = 'write_prefs'\n write_diary = 'write_diary'\n write_api = 'write_api'\n read_gpx = 'read_gpx'\n write_gpx = 'write_gpx'\n write_notes = 'write_notes'" }, { "identifier": "ChangesetCommentService", "path": "src/services/changeset_comment_service.py", "snippet": "class ChangesetCommentService:\n @staticmethod\n async def subscribe(changeset_id: int) -> Changeset:\n \"\"\"\n Subscribe current user to changeset discussion.\n \"\"\"\n\n try:\n async with DB() as session:\n changeset = await session.get(\n Changeset,\n changeset_id,\n options=[\n joinedload(Changeset.changeset_subscription_users),\n get_joinedload(),\n ],\n )\n\n if not changeset:\n raise_for().changeset_not_found(changeset_id)\n\n changeset.changeset_subscription_users.append(auth_user())\n\n except IntegrityError:\n raise_for().changeset_already_subscribed(changeset_id)\n\n return changeset\n\n @staticmethod\n async def unsubscribe(changeset_id: int) -> Changeset:\n \"\"\"\n Unsubscribe current user from changeset discussion.\n \"\"\"\n\n async with DB() as session:\n changeset = await session.get(\n Changeset,\n changeset_id,\n options=[\n joinedload(Changeset.changeset_subscription_users),\n get_joinedload(),\n ],\n )\n\n if not changeset:\n raise_for().changeset_not_found(changeset_id)\n\n # TODO: will this work?\n try:\n changeset.changeset_subscription_users.remove(auth_user())\n except ValueError:\n raise_for().changeset_not_subscribed(changeset_id)\n\n return changeset\n\n @staticmethod\n async def comment(changeset_id: int, text: str) -> Changeset:\n \"\"\"\n Comment on a changeset.\n \"\"\"\n\n async with DB() as session:\n changeset = await session.get(\n Changeset,\n changeset_id,\n options=[\n joinedload(Changeset.comments),\n get_joinedload(),\n ],\n )\n\n if not changeset:\n raise_for().changeset_not_found(changeset_id)\n if not changeset.closed_at:\n raise_for().changeset_not_closed(changeset_id)\n\n changeset.comments.append(\n ChangesetComment(\n user_id=auth_user().id,\n changeset_id=changeset_id,\n body=text,\n )\n )\n\n return changeset\n\n @staticmethod\n async def delete_comment_unsafe(comment_id: int) -> Changeset:\n \"\"\"\n Delete any changeset comment.\n \"\"\"\n\n async with DB() as session, session.begin():\n comment = await session.get(\n ChangesetComment,\n comment_id,\n with_for_update=True,\n )\n\n if not comment:\n raise_for().changeset_comment_not_found(comment_id)\n\n await session.delete(comment)\n await session.flush()\n\n changeset = await session.get(\n Changeset,\n comment.changeset_id,\n options=[get_joinedload()],\n )\n\n return changeset" } ]
from typing import Annotated from fastapi import APIRouter, Form from pydantic import PositiveInt from src.lib.auth import api_user from src.lib.format.format06 import Format06 from src.limits import CHANGESET_COMMENT_BODY_MAX_LENGTH from src.models.db.user import User from src.models.scope import ExtendedScope, Scope from src.services.changeset_comment_service import ChangesetCommentService
11,085
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt, _: Annotated[User, api_user(Scope.write_api)], ) -> dict: changeset = await ChangesetCommentService.subscribe(changeset_id)
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt, _: Annotated[User, api_user(Scope.write_api)], ) -> dict: changeset = await ChangesetCommentService.subscribe(changeset_id)
return Format06.encode_changeset(changeset)
1
2023-11-04 01:12:13+00:00
16k
codefuse-ai/Collinear-Constrained-Attention
train/trainer/atorch_trainer.py
[ { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size():\ndef wait_for_everyone():\ndef atorch_init_distributed(backend=\"nccl\"):\ndef atorch_reset_distributed():\ndef _goes_first(is_main):\ndef get_model_params_num(model):\ndef main_process_first():\ndef unwrap_model(model):\ndef honor_type(obj, generator):\ndef recursively_apply(\n func,\n data,\n *args,\n test_type=lambda t: isinstance(t, torch.Tensor),\n error_on_other_type=False,\n **kwargs,\n):\ndef gather(tensor):\n def _gpu_gather_one(tensor):\ndef save_ckpt(model, optimizer, lr_scheduler, epoch, steps, save_path, logger):\ndef scheduler_and_resume(args, train_dataloader, model, optimizer, checkpoint):\ndef get_computation_speed(batch_size_per_device, seq_len, step_time):\ndef human_readable_flops(num):\ndef get_tflops_new(args, batch_size, seq_len, step_time):\ndef get_tflops_megatron(total_model_param, hidden_size, num_hidden_layers, \n batch_size_per_device, seq_len, step_time):\ndef is_old_version(path):\ndef generate_task_id(data_paths, train_mode):\n def __init__(self, patience=7, verbose=False, delta=0):\n def __call__(self, val_loss, model):\n def save_checkpoint(self, val_loss, model):\nclass EarlyStopping:" }, { "identifier": "FAMO", "path": "utils/auto_accelerate_utils.py", "snippet": "class FAMO:\n \"\"\"\n Fast Adaptive Multitask Optimization.\n \"\"\"\n def __init__(\n self,\n n_tasks: int,\n device: torch.device,\n mode: str = 'famo_valid',\n gamma: float = 0.001, # the regularization coefficient, default: 0.001\n w_lr: float = 0.025, # the learning rate of the task logits, default: 0.025\n max_norm: float = 1.0, # the maximum gradient norm\n ):\n self.min_losses = torch.zeros(n_tasks).to(device)\n self.w = torch.tensor([0.0] * n_tasks, device=device, requires_grad=True)\n self.w_opt = torch.optim.Adam([self.w], lr=w_lr, weight_decay=gamma)\n self.max_norm = max_norm\n self.n_tasks = n_tasks\n self.device = device\n self.first_train_step = True\n self.first_valid_step = True\n self.print_loss = None\n self.mode = mode\n self.prev_train_loss = None\n self.prev_valid_loss = None\n self.ratio_valid_task_loss_prev = torch.zeros(len(ID2TASK)).to(device)\n self.global_steps = 0\n self.z = None\n \n def set_min_losses(self, losses):\n self.min_losses = losses\n\n def get_weighted_loss(self, losses):\n self.prev_train_loss = losses\n self.z = F.softmax(self.w * 1, -1)\n # if is_main_process() and (self.global_steps % 10 == 0):\n # logger.info(f\"complete_steps: {self.global_steps}, per_task_weight: {self.z}\")\n if -1e20 in self.ratio_valid_task_loss_prev and self.mode == 'famo_valid_ema':\n self.z = F.softmax(torch.where(self.ratio_valid_task_loss_prev == -1e20, -1e20, self.z), -1)\n if self.global_steps % 10 == 0:\n print_rank_0(f'ratio_valid_task_loss_prev is {self.ratio_valid_task_loss_prev}, after, z is {self.z}')\n D = losses - self.min_losses + 1e-8\n if self.mode.startswith('famo_train'):\n c = (self.z / D).sum().detach()\n loss = (D.log() * self.z / c).sum()\n else:\n loss = (D * self.z).sum()\n return loss\n\n def update(self, curr_loss):\n if self.mode.startswith('famo_valid') and self.first_valid_step:\n self.first_valid_step = False\n self.prev_valid_loss = curr_loss\n return\n if self.mode.startswith('famo_train'):\n prev_loss = self.prev_train_loss\n else:\n prev_loss = self.prev_valid_loss\n self.prev_valid_loss = curr_loss\n delta = (prev_loss - self.min_losses + 1e-8).log() - \\\n (curr_loss - self.min_losses + 1e-8).log()\n with torch.enable_grad():\n d = torch.autograd.grad(F.softmax(self.w, -1),\n self.w,\n grad_outputs=delta.detach())[0]\n self.w_opt.zero_grad()\n self.w.grad = d\n self.w_opt.step()\n\n def backward(\n self,\n losses: torch.Tensor,\n shared_parameters: Union[\n List[torch.nn.parameter.Parameter], torch.Tensor\n ] = None,\n ):\n \"\"\"\n Parameters\n ----------\n losses :\n shared_parameters :\n task_specific_parameters :\n last_shared_parameters : parameters of last shared layer/block\n Returns\n -------\n Loss, extra outputs\n \"\"\"\n loss = self.get_weighted_loss(losses=losses)\n # if self.max_norm > 0 and shared_parameters is not None:\n # torch.nn.utils.clip_grad_norm_(shared_parameters, self.max_norm)\n # loss.backward()\n return loss" }, { "identifier": "get_ltor_masks_and_position_ids", "path": "utils/auto_accelerate_utils.py", "snippet": "def get_ltor_masks_and_position_ids(data):\n \"\"\"Build masks and position id for left to right model.\"\"\"\n\n # Extract batch size and sequence length.\n batch_size, seq_length = data.size()\n\n # Attention mask (lower triangular).\n # attention_mask = get_attn_mask(\n # seq_length=seq_length,\n # device=data.device,\n # )\n attention_mask = torch.ones((batch_size, seq_length), device=data.device)\n\n # Position ids.\n position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)\n position_ids = position_ids.unsqueeze(0).expand_as(data).clone()\n\n return attention_mask, position_ids" }, { "identifier": "SelfPacedStatus", "path": "utils/auto_accelerate_utils.py", "snippet": "class SelfPacedStatus:\n def __init__(self, interval=20):\n super(SelfPacedStatus, self).__init__()\n self.complete_steps = None\n self.current_epoch = None\n self.mode = None\n self.task_loss_prev = None\n self.w = None\n self.interval = interval\n \n def update(self, complete_steps, current_epoch, mode, task_loss_prev):\n self.complete_steps = complete_steps\n self.current_epoch = current_epoch\n self.mode = mode\n self.task_loss_prev = task_loss_prev" }, { "identifier": "GPTNeoXLayer", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_parallel_residual = config.use_parallel_residual\n self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = GPTNeoXAttention(config)\n self.mlp = GPTNeoXMLP(config)\n\n def forward(\n self,\n hidden_states: Optional[torch.FloatTensor],\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n ):\n\n attention_layer_outputs = self.attention(\n self.input_layernorm(hidden_states),\n attention_mask=attention_mask,\n position_ids=position_ids,\n layer_past=layer_past,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)\n outputs = attention_layer_outputs[1:]\n\n if self.use_parallel_residual:\n # pseudocode:\n # x = x + attn(ln1(x)) + mlp(ln2(x))\n mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))\n hidden_states = mlp_output + attn_output + hidden_states\n else:\n # pseudocode:\n # x = x + attn(ln1(x))\n # x = x + mlp(ln2(x))\n attn_output = attn_output + hidden_states\n mlp_output = self.mlp(self.post_attention_layernorm(attn_output))\n hidden_states = mlp_output + attn_output\n\n if use_cache:\n outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)\n else:\n outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)\n\n return outputs" }, { "identifier": "GPTNeoXAttention", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them\"\n )\n self.head_size = self.hidden_size // self.num_attention_heads\n self.rotary_ndims = int(self.head_size * config.rotary_pct)\n self._init_bias(config.max_position_embeddings)\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9), persistent=False)\n self._init_rope()\n self.register_buffer(\n \"norm_factor\",\n torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype()),\n persistent=False,\n )\n self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def _init_bias(self, max_positions, device=None):\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(\n 1, 1, max_positions, max_positions\n ),\n persistent=False,\n )\n if device is not None:\n self.bias = self.bias.to(device)\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = GPTNeoXRotaryEmbedding(\n self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base\n )\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: torch.FloatTensor,\n position_ids: torch.LongTensor,\n head_mask: Optional[torch.FloatTensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ):\n has_layer_past = layer_past is not None\n\n # Compute QKV\n # Attention heads [batch, seq_len, hidden_size]\n # --> [batch, seq_len, (np * 3 * head_size)]\n qkv = self.query_key_value(hidden_states)\n\n # [batch, seq_len, (num_heads * 3 * head_size)]\n # --> [batch, seq_len, num_heads, 3 * head_size]\n new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)\n qkv = qkv.view(*new_qkv_shape)\n\n # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]\n query = qkv[..., : self.head_size].permute(0, 2, 1, 3)\n t_layer = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)\n value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)\n\n t_layer_1 = t_layer[..., : t_layer.shape[-1] // 2]\n t_layer_2 = t_layer[..., t_layer.shape[-1] // 2 :]\n t_layer = (t_layer_1+t_layer_2)/2\n\n t_layer = F.relu(t_layer)\n\n t_layer = torch.cat((t_layer, t_layer), dim=-1)\n\n # Compute rotary embeddings on rotary_ndims\n query_rot = query[..., : self.rotary_ndims]\n query_pass = query[..., self.rotary_ndims :]\n t_rot = t_layer[..., : self.rotary_ndims]\n t_pass = t_layer[..., self.rotary_ndims :]\n\n # Compute token offset for rotary embeddings (when decoding)\n seq_len = t_layer.shape[-2]\n if has_layer_past:\n seq_len += layer_past[0].shape[-2]\n cos, sin = self.rotary_emb(value, seq_len=seq_len)\n query_rot, t_layer = apply_rotary_pos_emb(query_rot, t_rot, cos, sin, position_ids)\n query_rot = torch.cat((query_rot, query_pass), dim=-1)\n t_layer = torch.cat((t_layer, t_pass), dim=-1)\n\n # Cache QKV values\n if has_layer_past:\n past_t = layer_past[0]\n past_value = layer_past[1]\n t_layer = torch.cat((past_t, t_layer), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n present = (t_layer, value) if use_cache else None\n\n # Compute attention\n attn_output, attn_weights = self._attn(query, t_layer, query_rot, value, attention_mask, head_mask)\n\n # Reshape outputs\n attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)\n attn_output = self.dense(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n @classmethod\n def _split_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Splits hidden dim into attn_head_size and num_attention_heads\n \"\"\"\n # tensor: [bs, seq_len, hidden_size]\n new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(new_shape)\n # -> [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3)\n return tensor\n\n @classmethod\n def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden dim\n \"\"\"\n # tensor [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)\n # -> [bs, seq_len, hidden_size]\n return tensor\n\n def _attn(self, query, t_layer, query_rot, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = t_layer.size(-2)\n\n # dynamically increase the causal mask with the key length, if needed.\n if key_length > self.bias.shape[-1]:\n self._init_bias(key_length, device=t_layer.device)\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n # query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n # key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n # attn_scores = torch.zeros(\n # batch_size * num_attention_heads,\n # query_length,\n # key_length,\n # dtype=query.dtype,\n # device=key.device,\n # )\n # attn_scores = torch.baddbmm(\n # attn_scores,\n # query,\n # key.transpose(1, 2),\n # beta=1.0,\n # alpha=(torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device) / self.norm_factor),\n # )\n # attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n # print(query.shape)\n # print(t_layer.shape)\n # print(query_rot.shape)\n\n attn_scores = contract(\n # 'nbpd,sbpd,nbpd->bpns',\n 'bpnd,bpsd,bpnd->bpns',\n query, # [sq, b, np, hn] [b,np,sq,hn]\n t_layer, #[sk, b, np, hn] [b,np,sk,hn]\n query_rot, # [sq, b, np, hn] [b,np,sq,hn]\n backend='torch'\n ) / self.norm_factor\n\n mask_value = torch.finfo(attn_scores.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)\n attn_scores = torch.where(causal_mask, attn_scores, mask_value)\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_scores = attn_scores + attention_mask\n\n attn_weights = nn.functional.softmax(attn_scores, dim=-1)\n attn_weights = attn_weights.to(value.dtype)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n return attn_output, attn_weights" }, { "identifier": "GPTNeoXMLP", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)\n self.act = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n hidden_states = self.dense_h_to_4h(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.dense_4h_to_h(hidden_states)\n return hidden_states" }, { "identifier": "LlamaDecoderLayer", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaDecoderLayer(nn.Module):\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.self_attn = LlamaAttention(config=config)\n self.mlp = LlamaMLP(config)\n self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n \"\"\"\n\n residual = hidden_states\n\n hidden_states = self.input_layernorm(hidden_states)\n\n # Self Attention\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = residual + hidden_states\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights,)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs" }, { "identifier": "LlamaAttention", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.num_key_value_heads = config.num_key_value_heads\n self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n self.max_position_embeddings = config.max_position_embeddings\n\n #20230803 T需要保持非负\n self.relu = ACT2FN['relu']\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n f\" and `num_heads`: {self.num_heads}).\"\n )\n self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)\n #20230803 K改为T\n self.t_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n # self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)\n self._init_rope()\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n\n # todo tp>1\n if self.config.pretraining_tp > 1:\n key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp\n query_slices = self.q_proj.weight.split(\n (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0\n )\n key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)\n value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)\n\n query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]\n query_states = torch.cat(query_states, dim=-1)\n\n key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]\n key_states = torch.cat(key_states, dim=-1)\n\n value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]\n value_states = torch.cat(value_states, dim=-1)\n\n else:\n query_states = self.q_proj(hidden_states)\n #20230803 K改为T\n t_states = self.t_proj(hidden_states)\n # key_states = self.k_proj(hidden_states)\n value_states = self.v_proj(hidden_states)\n\n query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n\n #20230803 T的定义\n t_states = t_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n t_states_1 = t_states[..., : t_states.shape[-1] // 2]\n t_states_2 = t_states[..., t_states.shape[-1] // 2 :]\n t_states = (t_states_1+t_states_2)/2\n t_states = F.relu(t_states)\n t_states = torch.cat((t_states, t_states), dim=-1)\n\n # key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n\n kv_seq_len = t_states.shape[-2]\n if past_key_value is not None:\n kv_seq_len += past_key_value[0].shape[-2]\n cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)\n query_rot, t_states = apply_rotary_pos_emb(query_states, t_states, cos, sin, position_ids)\n\n if past_key_value is not None:\n # reuse k, v, self_attention\n t_states = torch.cat([past_key_value[0], t_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n\n past_key_value = (t_states, value_states) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n t_states = repeat_kv(t_states, self.num_key_value_groups)\n value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n attn_weights = contract(\n 'bpnd,bpsd,bpnd->bpns',\n query_states, # [b,p,sq,d]\n t_states, # [b,p,sk,d]\n query_rot, # [b,p,sq,d]\n backend='torch'\n ) / math.sqrt(self.head_dim)\n # attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n\n if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights + attention_mask\n\n # upcast attention to fp32\n attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)\n attn_output = torch.matmul(attn_weights, value_states)\n\n if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous()\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n if self.config.pretraining_tp > 1:\n attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)\n o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)\n attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])\n else:\n attn_output = self.o_proj(attn_output)\n\n if not output_attentions:\n attn_weights = None\n\n return attn_output, attn_weights, past_key_value" }, { "identifier": "LlamaMLP", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.intermediate_size = config.intermediate_size\n self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, x):\n if self.config.pretraining_tp > 1:\n slice = self.intermediate_size // self.config.pretraining_tp\n gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)\n up_proj_slices = self.up_proj.weight.split(slice, dim=0)\n down_proj_slices = self.down_proj.weight.split(slice, dim=1)\n\n gate_proj = torch.cat(\n [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1\n )\n up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)\n\n intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)\n down_proj = [\n F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)\n ]\n down_proj = sum(down_proj)\n else:\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n return down_proj" }, { "identifier": "PeftModel", "path": "model/peft/modeling_peft.py", "snippet": "class AntPeftForCausalLM(PeftModelForCausalLM):\nclass AntPeftForEmbedding(PeftModel):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n route_id: int = 0,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n def from_pretrained(\n cls,\n model,\n model_id: str,\n adapter_name: str = \"default\",\n is_trainable: bool = False,\n resume_from_checkpoint: bool = False,\n **kwargs\n ):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n query_ids: torch.Tensor,\n query_position_ids: torch.Tensor = None,\n query_attention_mask: torch.Tensor = None,\n query_mask: torch.Tensor = None,\n passage_ids: torch.Tensor = None,\n passage_position_ids: torch.Tensor = None,\n passage_attention_mask: torch.Tensor = None,\n passage_mask: torch.Tensor = None,\n route_id: int = 0,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):" } ]
import datetime import json import logging import math import os import random import re import shutil import time import warnings import gc import numpy as np import atorch import torch from functools import partial from pathlib import Path from deepspeed.ops.adam import DeepSpeedCPUAdam from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm from transformers import get_scheduler as get_scheduler_trans from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.trainer import ( OPTIMIZER_NAME, SCHEDULER_NAME, TRAINER_STATE_NAME, TRAINING_ARGS_NAME ) from transformers.trainer_pt_utils import reissue_pt_warnings from transformers.trainer_utils import ( PREFIX_CHECKPOINT_DIR, ) from transformers.utils import WEIGHTS_NAME from torch.nn import CrossEntropyLoss from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus from atorch.auto import auto_accelerate from atorch.utils.version import torch_version from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP from model.glm.modeling_glm import GLMBlock from torch.cuda.amp import GradScaler from apex.optimizers import FusedSGD from model.peft.modeling_peft import PeftModel
11,792
losses = torch.cat(losses) losses = losses[: len(self.valid_dataset)] mean_loss = torch.mean(losses).item() accumulated_task_loss = torch.tensor(accumulated_task_loss_np).to(self.device) accumulated_task_num = torch.tensor(accumulated_task_num_np).to(self.device) torch.distributed.all_reduce(accumulated_task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(accumulated_task_num, op=torch.distributed.ReduceOp.SUM) accumulated_task_loss /= torch.distributed.get_world_size() valid_task_loss = accumulated_task_loss / (accumulated_step - 1) logs = {'valid_loss': mean_loss} per_task_valid_loss = {self.ID2TASK[i]+'_loss': valid_task_loss[i].item() for i in range(len(self.ID2TASK))} logs.update(per_task_valid_loss) if is_global_main_process(): logger.info('log point') for i in range(len(self.ID2TASK)): if accumulated_task_num[i] != 0: logger.info(f"{self.ID2TASK[i]}_loss: {valid_task_loss[i]}, sample nums: {accumulated_task_num[i]}") self.log(logs, step=self.global_steps, phase='Evaluation') metrics = {'valid_loss': mean_loss, 'valid_task_loss': valid_task_loss} logger.info(f"Finish evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) return metrics def log(self, logs, step, phase='Train'): if not self.summary_writer: return logger.info(json.dumps(logs)) for key, value in logs.items(): self.summary_writer.add_scalar(f'{phase}/{key}', value, step) def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)', use_mtime=False ): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob( f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append( (os.path.getmtime(path), path)) else: regex_match = re.search( f".*{checkpoint_prefix}-({checkpoint_name_pattern})", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append( (int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.best_model_checkpoint))) # for i in range(best_model_index, len(checkpoints_sorted) - 2): for i in range(best_model_index, len(checkpoints_sorted) - 1): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] print_rank_0(f'checkpoints sorted list: {checkpoints_sorted}') return checkpoints_sorted def _rotate_checkpoints( self, use_mtime=False, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='.*') -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern=checkpoint_name_pattern) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit number_of_checkpoints_to_delete = max( 0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def _clean_atorch_checkpoints(self, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR): # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern='([0-9]+)') # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. for checkpoint in checkpoints_sorted[:-1]: logger.info( f"Deleting older atorch checkpoint [{checkpoint}] due to self.args.save_total_limit") try: os.remove(os.path.join(checkpoint, ATORCH_CHECKPOINT_NAME)) except Exception: continue def _save_peft_model(self, output_dir, state_dict=None): logger.info(f"Start saving peft model to {output_dir}") output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) model = unwrap_model(self.model)
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. HYPER_PARAMETER_NAME = 'hyper_parameters.json' ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin' EPOCH_CHECKPOINT_NAME = 'epoch' FAMO_CHECKPOINT_NAME = 'famo_checkpoint' EMA_CHECKPOINT_NAME = 'ema_checkpoint' # logger = logging.getLogger(__name__) def is_local_main_process(): return atorch.local_rank() == 0 def is_global_main_process(): return atorch.rank() == 0 def has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def count_model_params(model): trainable_params = 0 all_params = 0 for param in model.parameters(): num_params = param.numel() all_params += num_params if param.requires_grad: trainable_params += num_params return all_params, trainable_params class AtorchArguments: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): inverse_log_warm_up = 1.0 / math.log(num_warmup_steps) if current_step == 0: return 0.0 if current_step < num_warmup_steps: return inverse_log_warm_up * math.log(current_step) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps): scheduler_map = { 'log_warmup_linear_decay': get_linear_schedule_with_log_warmup} try: lr_scheduler = get_scheduler_trans( name, optimizer, num_warmup_steps, num_training_steps) return lr_scheduler except Exception: schedule_func = scheduler_map[name] return schedule_func(optimizer, num_warmup_steps, num_training_steps) class AtorchTrainer: def __init__(self, model, args, train_dataset, valid_dataset, tokenizer=None, callbacks=None, no_save_atorch_checkpoint=None, save_pytorch_model_bin_checkpoint=True, train_peft=False, rank=0, max_shard_size='10GB', files_to_save=None, args_to_save=None, data_collator=None, my_loss_func=None, **kwargs, ): self.args = args self.TASK2ID = TASK2ID self.ID2TASK = ID2TASK print('in atorch trainer') print(TASK2ID) print(ID2TASK) self.model = model self.no_save_atorch_checkpoint = no_save_atorch_checkpoint self.save_pytorch_model_bin_checkpoint = save_pytorch_model_bin_checkpoint self.train_peft = train_peft self.rank = rank self.kwargs = kwargs self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.tokenizer = tokenizer self.max_shard_size = max_shard_size self.files_to_save = files_to_save self.args_to_save = args_to_save self.best_metric = None self.best_model_checkpoint = None self.no_save_base_model = True self.device = f"cuda:{atorch.local_rank()}" self.famo = FAMO(n_tasks=len(TASK2ID), device=self.device, mode=self.args.weighted_loss_mode) self.famo_resume = False self.selfpaced_status = SelfPacedStatus(args.selfpaced_interval) self.total_train_batch_size = self.args.per_device_train_batch_size * \ self.args.gradient_accumulation_steps * \ atorch.world_size() self.data_collator = data_collator self.my_loss_func = my_loss_func if self.args.early_stopping_patience > 0: print(f'early_stopping_patience: {self.args.early_stopping_patience}') patience = self.args.early_stopping_patience self.early_stopping = EarlyStopping(patience, verbose=True) self.train_dataloader_args = { "shuffle": True, "batch_size": self.total_train_batch_size, "pin_memory": True, "collate_fn": data_collator, "drop_last": True, "num_workers": self.args.num_workers, # "persistent_workers": args.num_workers > 0, } self.valid_dataloader = DataLoader( valid_dataset, sampler=DistributedSampler(valid_dataset, shuffle=True), batch_size=args.per_device_valid_batch_size, pin_memory=True, collate_fn=data_collator ) self.valid_dataloader_length = len(self.valid_dataloader) if self.args.resume_from_checkpoint == 'true': self.resume_checkpoint_dir = self.get_last_checkpoint( self.args.output_dir) self.atorch_args = AtorchArguments( lr=args.learning_rate, weight_decay=args.weight_decay, adam_eps=args.adam_epsilon, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2) self.atorch_init() self.num_update_steps_per_epoch = math.ceil( len(self.train_dataloader) / self.args.gradient_accumulation_steps) print(f'number of update steps per epoch: {self.num_update_steps_per_epoch}') if self.args.max_steps == -1: self.args.max_steps = int( self.args.num_train_epochs * self.num_update_steps_per_epoch) else: self.args.num_train_epochs = math.ceil( self.args.max_steps / self.num_update_steps_per_epoch) # self.args.warmup_steps = self.args.get_warmup_steps( # self.args.max_steps) # 找不到get_warmup_steps custom_lr_scheduler_type = self.kwargs.get( 'custom_lr_scheduler_type', None) self.lr_scheduler = get_scheduler( name=custom_lr_scheduler_type if custom_lr_scheduler_type else self.args.lr_scheduler_type, optimizer=self.optimizer, num_warmup_steps=self.args.num_warmup_steps, num_training_steps=self.args.max_steps, ) print_rank_0(f'lr_scheduler{self.lr_scheduler}') if self.args.resume_from_checkpoint == 'true': with warnings.catch_warnings(record=True): self.lr_scheduler.load_state_dict(torch.load( os.path.join(self.resume_checkpoint_dir, SCHEDULER_NAME))) self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP) wrap_class = (LlamaDecoderLayer,) elif self.args.model_type == 'glm': wrap_class = (GLMBlock,) parallel_mode = [] if self.args.dp: # p_mode = ([("data", torch.distributed.get_world_size())], None) parallel_mode.append(("data", self.args.dp)) if self.args.tp: parallel_mode.append(("tensor_parallel", self.args.tp)) strategy = [ # ("parallel_mode", p_mode), ("parallel_mode", (parallel_mode, None)), "module_replace", # ("fsdp", fsdp_config), # ("amp_native", {"dtype": torch.bfloat16}) if self.args.bf16 else "amp_native", # ("checkpoint", wrap_class), ] if self.args.peft_type is None or self.args.peft_type == 'lora': cpu_offload = False if self.args.total_model_param < 1e9 else True fsdp_config = { "atorch_wrap_cls": wrap_class, "sync_module_states": True, "use_orig_params": True, "limit_all_gathers": True, # "cpu_offload": True, } print(fsdp_config) fsdp_opt = ("fsdp", fsdp_config) strategy.append(fsdp_opt) self.args.atorch_opt = "fsdp" else: num_all_params, num_trainable_params = count_model_params(self.model) if num_all_params < 11e9 or self.args.peft_type == "qlora": # For GLM-10B logger.info( f"Found using {self.args.peft_type} method. The peft model has {num_all_params} and only " f"{num_trainable_params} params are trainable({100 * num_trainable_params / num_all_params}%)" ". Set atorch opt to DistributedDataParallel.") self.args.atorch_opt = "ddp" if self.args.bf16 or self.args.fp16: if self.args.bf16: amp_config = {"dtype": torch.bfloat16, "skip_if_nonfinite": True} # amp_config = {"dtype": torch.bfloat16} if self.args.peft_type == "qlora": # The dtype of grads is bf16 when using qlora # atorch scaler does not support bf16 grads. amp_config["skip_if_nonfinite"] = False elif self.args.fp16: amp_config = {"dtype": torch.float16} strategy.append(("amp_native", amp_config)) # strategy.append(("half", "bf16")) if self.args.checkpoint_activations: strategy.append(("checkpoint", wrap_class)) print(f"Manually loaded auto acc strategy: {strategy}") def prepare_input(batch, device): # DEBUG: GLM NoneType batch = {k: v.to(device=device, non_blocking=True) if v is not None else None for k, v in batch.items()} return batch def optim_param_func(model, args): no_decay = ["bias", "LayerNorm.weight", "layernorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] return optimizer_grouped_parameters # load fsdp checkpoint参数 if self.args.resume_from_checkpoint == 'true': logger.info(f'Resume training from {self.resume_checkpoint_dir}') if self.is_rank0(): sd = torch.load(os.path.join( self.resume_checkpoint_dir, ATORCH_CHECKPOINT_NAME), map_location='cpu') model_state_dict, optim_state_dict = sd['model_state_dict'], sd['optimizer_state_dict'] else: model_state_dict, optim_state_dict = None, None torch.distributed.barrier() # other rank waiting ########## self.load_atorch_model_state(model_state_dict) ########## if self.is_rank0(): print(f'GPU mem before fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) optim_func = torch.optim.AdamW print(f'optimizer before fsdp: {optim_func}') ddp_find_unused_parameters = None if self.args.atorch_opt == "ddp" and not (self.args.peft_type in ["lora", "qlora"] and self.args.checkpoint_activations): ddp_find_unused_parameters = True status, result, best_strategy = auto_accelerate( self.model, optim_func, self.train_dataset, dataloader_args=self.train_dataloader_args, loss_func=self.my_loss_func, prepare_input=prepare_input, optim_args={ "lr": self.atorch_args.lr, "weight_decay": self.atorch_args.weight_decay, "eps": self.atorch_args.adam_eps, "betas": (self.atorch_args.adam_beta1, self.atorch_args.adam_beta2), }, optim_param_func=partial( optim_param_func, args=self.atorch_args), load_strategy=strategy, ignore_dryrun_on_load_strategy=True, find_unused_parameters=ddp_find_unused_parameters, ) assert ( status ), f"auto_accelerate failed. status: {status}, result: {result}, best_strategy: {best_strategy}" print(f"Best strategy is: {best_strategy}") self.model = result.model self.optimizer = result.optim print(f'optimizer after fsdp: {self.optimizer}') self.loss_func = result.loss_func self.train_dataloader = result.dataloader self.prepare_input = result.prepare_input if self.args.resume_from_checkpoint == 'true': self.load_atorch_optim_state(optim_state_dict) if self.args.weighted_loss_mode.startswith('famo_valid'): self.load_famo_state() print(f"atorch use optimizer: {self.optimizer}") if self.is_rank0(): print(f'GPU mem after fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) def evaluate(self): logger.info(f"Start evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) print(f'valid dataset length is: {len(self.valid_dataset)}') print(f'valid dataloader length is: {len(self.valid_dataloader)}') print(f'per device batch size: {self.args.per_device_valid_batch_size}') progress_bar = tqdm(range(len(self.valid_dataloader)), disable=not is_local_main_process(), smoothing=0) self.model.eval() losses = [] accumulated_task_loss_np = np.zeros(len(self.ID2TASK)) accumulated_task_num_np = np.zeros(len(self.ID2TASK)) accumulated_step = 0 for step, batch in enumerate(self.valid_dataloader): # if step >= self.args.valid_iters: if step >= self.args.valid_iters and (self.args.total_model_param >= 1e9 or self.args.train_mode == 'sst'): break with torch.no_grad(): # batch = {k: v.to(self.device) for k, v in batch.items()} # batch = self.prepare_input(batch, self.device) # outputs = self.model(**batch) outputs = self.model( input_ids=batch['input_ids'].to(self.device), attention_mask=batch['attention_mask'].to(self.device), position_ids=batch['position_ids'].to(self.device) ) # loss = outputs["loss"] loss, task_loss, task_num, _, _ = self.loss_func(outputs, batch, self.args.weighted_loss_mode) repeated_loss = loss.repeat( self.args.per_device_valid_batch_size) if repeated_loss.ndim == 0: repeated_loss = repeated_loss.clone()[None] output_tensors = [repeated_loss.clone() for _ in range(atorch.world_size())] torch.distributed.all_gather(output_tensors, repeated_loss) for tensor in output_tensors: if torch.isnan(tensor).any() or torch.isinf(tensor).any(): accumulated_step -= 1 continue losses.append(torch.cat(output_tensors, dim=0).cpu()) task_loss = task_loss.cpu().numpy() task_num = task_num.cpu().numpy() accumulated_task_loss_np += task_loss accumulated_task_num_np += task_num accumulated_step += 1 progress_bar.update(1) losses = torch.cat(losses) losses = losses[: len(self.valid_dataset)] mean_loss = torch.mean(losses).item() accumulated_task_loss = torch.tensor(accumulated_task_loss_np).to(self.device) accumulated_task_num = torch.tensor(accumulated_task_num_np).to(self.device) torch.distributed.all_reduce(accumulated_task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(accumulated_task_num, op=torch.distributed.ReduceOp.SUM) accumulated_task_loss /= torch.distributed.get_world_size() valid_task_loss = accumulated_task_loss / (accumulated_step - 1) logs = {'valid_loss': mean_loss} per_task_valid_loss = {self.ID2TASK[i]+'_loss': valid_task_loss[i].item() for i in range(len(self.ID2TASK))} logs.update(per_task_valid_loss) if is_global_main_process(): logger.info('log point') for i in range(len(self.ID2TASK)): if accumulated_task_num[i] != 0: logger.info(f"{self.ID2TASK[i]}_loss: {valid_task_loss[i]}, sample nums: {accumulated_task_num[i]}") self.log(logs, step=self.global_steps, phase='Evaluation') metrics = {'valid_loss': mean_loss, 'valid_task_loss': valid_task_loss} logger.info(f"Finish evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) return metrics def log(self, logs, step, phase='Train'): if not self.summary_writer: return logger.info(json.dumps(logs)) for key, value in logs.items(): self.summary_writer.add_scalar(f'{phase}/{key}', value, step) def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)', use_mtime=False ): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob( f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append( (os.path.getmtime(path), path)) else: regex_match = re.search( f".*{checkpoint_prefix}-({checkpoint_name_pattern})", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append( (int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.best_model_checkpoint))) # for i in range(best_model_index, len(checkpoints_sorted) - 2): for i in range(best_model_index, len(checkpoints_sorted) - 1): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] print_rank_0(f'checkpoints sorted list: {checkpoints_sorted}') return checkpoints_sorted def _rotate_checkpoints( self, use_mtime=False, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='.*') -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern=checkpoint_name_pattern) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit number_of_checkpoints_to_delete = max( 0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def _clean_atorch_checkpoints(self, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR): # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern='([0-9]+)') # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. for checkpoint in checkpoints_sorted[:-1]: logger.info( f"Deleting older atorch checkpoint [{checkpoint}] due to self.args.save_total_limit") try: os.remove(os.path.join(checkpoint, ATORCH_CHECKPOINT_NAME)) except Exception: continue def _save_peft_model(self, output_dir, state_dict=None): logger.info(f"Start saving peft model to {output_dir}") output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) model = unwrap_model(self.model)
if isinstance(model, PeftModel):
10
2023-11-02 01:37:01+00:00
16k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "ImplicitFourierVolume", "path": "cryostar/nerf/volume_utils.py", "snippet": "class ImplicitFourierVolume(nn.Module):\n\n def __init__(self, z_dim, img_sz, mask_rad, params_implicit):\n \"\"\"\n Initialization of an implicit representation of the volume in Fourier space.\n\n Parameters\n ----------\n img_sz: int\n params_implicit: dictionary\n \"\"\"\n super().__init__()\n self.img_sz = img_sz\n self.z_dim = z_dim\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y] = torch.meshgrid([lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Y, X, torch.zeros_like(X)], dim=-1)\n coords = shift_coords(coords, 1., 1., 0, img_sz, img_sz, 1)\n self.register_buffer('plane_coords', coords.reshape(-1, 3))\n\n self.mask_rad = mask_rad\n if self.mask_rad != 1:\n mask = create_circular_mask(img_sz, img_sz, None, self.mask_rad / 2 * img_sz)\n plane_window_mask = torch.from_numpy(mask).reshape(-1)\n self.register_buffer('plane_window_mask', plane_window_mask)\n sphere_mask = torch.from_numpy(\n create_sphere_mask(self.img_sz, self.img_sz, self.img_sz, radius=self.mask_rad / 2 * self.img_sz)\n )\n self.register_buffer(\"sphere_mask\", sphere_mask)\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y, Z] = torch.meshgrid([lincoords, lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Z, Y, X], dim=-1)\n coords = shift_coords(coords, 1., 1., 1., img_sz, img_sz, img_sz)\n self.register_buffer('coords_3d', coords.reshape(-1, 3))\n\n self.fvol = FourierNet(net_type=params_implicit[\"net_type\"],\n z_dim=z_dim,\n pe_dim=params_implicit[\"pe_dim\"],\n pe_type=params_implicit[\"pe_type\"],\n D=params_implicit[\"D\"],\n hidden_dim=params_implicit[\"hidden\"],\n force_symmetry=params_implicit['force_symmetry'])\n\n def forward(self, z, rotmat):\n \"\"\"\n Generates a slice in Fourier space from a rotation matrix.\n\n Parameters\n ----------\n rotmat: torch.Tensor (B, 3, 3)\n\n Returns\n -------\n fplane: torch.Tensor (B, 1, img_sz, img_sz) (complex)\n \"\"\"\n if self.z_dim == 0:\n assert z is None\n batch_sz = rotmat.shape[0]\n\n with torch.autocast(\"cuda\", enabled=False):\n assert self.plane_coords.dtype == torch.float32\n assert rotmat.dtype == torch.float32\n rot_plane_coords = torch.bmm(self.plane_coords.repeat(batch_sz, 1, 1), rotmat) # B, img_sz^2, 3\n\n if self.mask_rad != 1:\n coords_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c3\", bsz=batch_sz, c3=3)\n rot_plane_coords = rot_plane_coords[coords_mask].reshape(batch_sz, -1, 3) # B, mask_num, 3\n\n fplane = self.fvol(z, rot_plane_coords) # B, _, 1/2\n\n if self.mask_rad != 1:\n unmask_fplane = fplane.new_zeros(batch_sz, self.img_sz * self.img_sz, self.fvol.out_features)\n value_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c\", bsz=batch_sz, c=self.fvol.out_features)\n unmask_fplane[value_mask] = fplane.reshape(-1)\n fplane = unmask_fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n else:\n fplane = fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n\n if self.fvol.out_features == 2:\n fplane = torch.view_as_complex(fplane) # B, img_sz, img_sz\n else:\n fplane = batch_hartley_to_fourier_2d(fplane.squeeze(-1)) # B, img_sz, img_sz\n\n fplane = fplane[:, None, :, :]\n return fplane\n\n def make_volume(self, z):\n with torch.no_grad():\n with torch.autocast(\"cuda\", enabled=False):\n coords = self.coords_3d.unsqueeze(0)\n num_coords = coords.shape[1]\n chunk_size = 128**2 * 32\n exp_fvol = []\n for sid in range(0, num_coords, chunk_size):\n eid = sid + chunk_size\n exp_fvol.append(self.fvol(z, coords[:, sid:eid]))\n exp_fvol = torch.cat(exp_fvol, dim=1)\n if self.fvol.out_features == 2:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz, 2)\n exp_fvol = torch.view_as_complex(exp_fvol)\n else:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz)\n exp_fvol = hartley_to_fourier_3d(exp_fvol)\n\n exp_fvol[~self.sphere_mask] = 0.0\n exp_vol = fourier_to_primal_3d(exp_fvol).real\n return exp_vol" }, { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "FourierGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class FourierGridTranslate(torch.nn.Module):\n \"\"\"\n DFT's translation is:\n `f(x - x0, y - y0) <=> F(u, v) exp(-2 j \\pi (x0 u + y0 v) / N )`\n where `x, y, u, v` all have a range of `N`, so `(x0 u + y0 v) / N \\in (0, N)`\n\n Here we initialize the `u, v` coordinates between `(-0.5, 0.5)` so that the \n range is 1, where the `1/N` term can be ignored.\n\n See also: https://dsp.stackexchange.com/questions/40228/translation-property-of-2-d-discrete-fourier-transform\n\n Important notes:\n If `N=4`, the coordinates u will be `[-0.5, -0.17, 0.17, 0.5]`, but the \n `fft`ed image's frequency is `[-0.50, -0.25, 0.00, 0.25]`, so we have to \n add some corrections:\n - right-shift `u` to be `[-0.50, -0.25, 0.00, 0.25]`\n - perform multiplication\n\n \"\"\"\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2) / 2\n # yapf: enable\n coords = shift_coords(coords, 0.5, 0.5, None, self.D, self.D, None, False)\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n images = einops.rearrange(images, \"B NY NX -> B 1 (NY NX)\")\n delta = trans @ self.coords.t() * -2j * torch.pi\n images_trans = torch.exp(delta) * images\n images_trans = einops.rearrange(images_trans, \"B T (NY NX) -> B T NY NX\", NY=self.D, NX=self.D)\n return images_trans" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "sample_along_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def sample_along_pca(z: np.ndarray, pca_dim=1, num=5) -> np.ndarray:\n assert isinstance(z, np.ndarray)\n pc, pca = run_pca(z)\n start = np.percentile(pc[:, pca_dim - 1], 5)\n stop = np.percentile(pc[:, pca_dim - 1], 95)\n z_pc_traj = get_pc_traj(pca, z.shape[1], num, pca_dim, start, stop)\n point, point_id = get_nearest_point(z, z_pc_traj)\n return point, point_id" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "pl_init_exp", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "VAEEncoder", "path": "cryostar/utils/ml_modules.py", "snippet": "class VAEEncoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.mean_layer = Linear(self.hidden_dim[-1], out_dim)\n self.var_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n mean = self.mean_layer(x)\n log_var = self.var_layer(x)\n return mean, log_var" }, { "identifier": "reparameterize", "path": "cryostar/utils/ml_modules.py", "snippet": "def reparameterize(mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mu + eps * std" }, { "identifier": "save_mrc", "path": "cryostar/utils/mrc_tools.py", "snippet": "def save_mrc(vol,\n path,\n voxel_size: Union[int, float, Tuple, np.recarray] = None,\n origin: Union[int, float, Tuple, np.recarray] = None):\n \"\"\"\n Save volumetric data to mrc file, set voxel_size, origin.\n See Also: https://mrcfile.readthedocs.io/en/stable/source/mrcfile.html#mrcfile.mrcobject.MrcObject.voxel_size\n Args:\n vol: density volume\n path: save path\n voxel_size: a single number, a 3-tuple (x, y ,z) or a modified version of the voxel_size array, default 1.\n origin: a single number, a 3-tuple (x, y ,z) or a modified version of the origin array, default 0.\n\n \"\"\"\n with mrcfile.new(path, overwrite=True) as m:\n m.set_data(vol)\n\n if voxel_size is not None:\n m.voxel_size = voxel_size\n\n if origin is not None:\n m.header.origin = origin" } ]
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
12,413
else: f_pred = self.vol(None, R) pred_ctf_params = {k: batch[k] for k in ('defocusU', 'defocusV', 'angleAstigmatism') if k in batch} f_pred = self.ctf(f_pred, batch['idx'], ctf_params=pred_ctf_params, mode="gt", frequency_marcher=None) if self.cfg.loss.loss_fn == "rmsf": pred = fourier_to_primal_2d(f_pred).real delta = pred - proj_out em_loss = delta.reshape(bsz, -1).square().mean() elif self.cfg.loss.loss_fn == "fmsf": f_proj = primal_to_fourier_2d(proj_out) delta = torch.view_as_real(f_proj - f_pred) delta = delta[einops.repeat(self.mask, "ny nx -> b 1 ny nx c", b=delta.shape[0], c=delta.shape[-1])] em_loss = delta.reshape(bsz, -1).square().mean() else: raise NotImplementedError loss = em_loss log_dict = {"em": em_loss} if self.z_dim != 0: log_dict["kld"] = kld_loss loss = loss + kld_loss if self.global_step % 100 == 0: log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " + pretty_dict(log_dict, 5)) return loss def on_validation_start(self) -> None: self.evaluate() def validation_step(self, *args, **kwargs): pass def save_ckpt(self): if self.trainer.is_global_zero: save_dir = self._get_save_dir() torch.save(self.vol.state_dict(), os.path.join(save_dir, "ckpt.pt")) # self.history_saved_dirs.append(save_dir) # keep_last_k = 1 # if len(self.history_saved_dirs) >= keep_last_k: # for to_remove in self.history_saved_dirs[:-keep_last_k]: # p = Path(to_remove) / "ckpt.pt" # if p.exists(): # p.unlink() # log_to_current(f"delete {p} to keep last {keep_last_k} ckpts") def evaluate(self) -> None: pixel_size = self.cfg.data_process.down_apix valid_loader = DataLoader(dataset=self.dataset, batch_size=self.cfg.data_loader.val_batch_per_gpu, shuffle=False, drop_last=False, num_workers=12) if self.trainer.is_global_zero: save_dir = self._get_save_dir() self.save_ckpt() if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is None: zs = [] for batch in tqdm(iter(valid_loader)): proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)").to(self.device) else: enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2).to(self.device) mu, log_var = self.encoder(enc_input) zs.append(mu.detach().cpu()) # if self.cfg.trainer.devices == 1 and len(zs) > 20: # for _ in range(10): # log_to_current("WARNING!" + "*" * _) # log_to_current( # "since only one device is used, we assume this is a debug mode, and do not go through all validsets" # ) # break zs = torch.cat(zs).cpu().numpy() else: zs = self.given_z.cpu().numpy() np.save(f"{save_dir}/z.npy", zs) kmeans_labels, centers = cluster_kmeans(zs, 10) centers, centers_ind = get_nearest_point(zs, centers) np.savetxt(f"{save_dir}/z_kmeans.txt", centers, fmt='%.5f') np.savetxt(f"{save_dir}/z_kmeans_ind.txt", centers_ind, fmt='%d') centers = torch.from_numpy(centers).to(self.device) for i in range(len(centers)): v = self.vol.make_volume(centers[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_kmeans_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) for pca_dim in range(1, 1 + min(3, self.cfg.model.z_dim)): z_on_pca, z_on_pca_id = sample_along_pca(zs, pca_dim, 10) np.savetxt(f"{save_dir}/z_pca_{pca_dim}.txt", z_on_pca, fmt='%.5f') np.savetxt(f"{save_dir}/z_pca_ind_{pca_dim}.txt", z_on_pca_id, fmt='%d') z_on_pca = torch.from_numpy(z_on_pca).to(self.device) for i in range(len(z_on_pca)): v = self.vol.make_volume(z_on_pca[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_pca_{pca_dim}_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) else: v = self.vol.make_volume(None) save_mrc(v.cpu().numpy(), f"{save_dir}/vol.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) def on_train_start(self) -> None: if self.trainer.is_global_zero: log_to_current(self) def configure_optimizers(self): return torch.optim.AdamW(self.parameters(), 0.0001) def train(): cfg = pl_init_exp(exp_prefix=TASK_NAME, backup_list=[ __file__, ], inplace=False) dataset = StarfileDataSet(
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input) z = reparameterize(mu, log_var) kld_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits) kld_loss = kld_loss / self.mask.sum() f_pred = self.vol(z, R) else: f_pred = self.vol(None, R) pred_ctf_params = {k: batch[k] for k in ('defocusU', 'defocusV', 'angleAstigmatism') if k in batch} f_pred = self.ctf(f_pred, batch['idx'], ctf_params=pred_ctf_params, mode="gt", frequency_marcher=None) if self.cfg.loss.loss_fn == "rmsf": pred = fourier_to_primal_2d(f_pred).real delta = pred - proj_out em_loss = delta.reshape(bsz, -1).square().mean() elif self.cfg.loss.loss_fn == "fmsf": f_proj = primal_to_fourier_2d(proj_out) delta = torch.view_as_real(f_proj - f_pred) delta = delta[einops.repeat(self.mask, "ny nx -> b 1 ny nx c", b=delta.shape[0], c=delta.shape[-1])] em_loss = delta.reshape(bsz, -1).square().mean() else: raise NotImplementedError loss = em_loss log_dict = {"em": em_loss} if self.z_dim != 0: log_dict["kld"] = kld_loss loss = loss + kld_loss if self.global_step % 100 == 0: log_to_current(f"epoch {self.current_epoch} [{batch_idx}/{self.trainer.num_training_batches}] | " + pretty_dict(log_dict, 5)) return loss def on_validation_start(self) -> None: self.evaluate() def validation_step(self, *args, **kwargs): pass def save_ckpt(self): if self.trainer.is_global_zero: save_dir = self._get_save_dir() torch.save(self.vol.state_dict(), os.path.join(save_dir, "ckpt.pt")) # self.history_saved_dirs.append(save_dir) # keep_last_k = 1 # if len(self.history_saved_dirs) >= keep_last_k: # for to_remove in self.history_saved_dirs[:-keep_last_k]: # p = Path(to_remove) / "ckpt.pt" # if p.exists(): # p.unlink() # log_to_current(f"delete {p} to keep last {keep_last_k} ckpts") def evaluate(self) -> None: pixel_size = self.cfg.data_process.down_apix valid_loader = DataLoader(dataset=self.dataset, batch_size=self.cfg.data_loader.val_batch_per_gpu, shuffle=False, drop_last=False, num_workers=12) if self.trainer.is_global_zero: save_dir = self._get_save_dir() self.save_ckpt() if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is None: zs = [] for batch in tqdm(iter(valid_loader)): proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)").to(self.device) else: enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2).to(self.device) mu, log_var = self.encoder(enc_input) zs.append(mu.detach().cpu()) # if self.cfg.trainer.devices == 1 and len(zs) > 20: # for _ in range(10): # log_to_current("WARNING!" + "*" * _) # log_to_current( # "since only one device is used, we assume this is a debug mode, and do not go through all validsets" # ) # break zs = torch.cat(zs).cpu().numpy() else: zs = self.given_z.cpu().numpy() np.save(f"{save_dir}/z.npy", zs) kmeans_labels, centers = cluster_kmeans(zs, 10) centers, centers_ind = get_nearest_point(zs, centers) np.savetxt(f"{save_dir}/z_kmeans.txt", centers, fmt='%.5f') np.savetxt(f"{save_dir}/z_kmeans_ind.txt", centers_ind, fmt='%d') centers = torch.from_numpy(centers).to(self.device) for i in range(len(centers)): v = self.vol.make_volume(centers[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_kmeans_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) for pca_dim in range(1, 1 + min(3, self.cfg.model.z_dim)): z_on_pca, z_on_pca_id = sample_along_pca(zs, pca_dim, 10) np.savetxt(f"{save_dir}/z_pca_{pca_dim}.txt", z_on_pca, fmt='%.5f') np.savetxt(f"{save_dir}/z_pca_ind_{pca_dim}.txt", z_on_pca_id, fmt='%d') z_on_pca = torch.from_numpy(z_on_pca).to(self.device) for i in range(len(z_on_pca)): v = self.vol.make_volume(z_on_pca[i:i + 1]) save_mrc(v.cpu().numpy(), f"{save_dir}/vol_pca_{pca_dim}_{i:03}.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) else: v = self.vol.make_volume(None) save_mrc(v.cpu().numpy(), f"{save_dir}/vol.mrc", pixel_size, -pixel_size * (v.shape[0] // 2)) def on_train_start(self) -> None: if self.trainer.is_global_zero: log_to_current(self) def configure_optimizers(self): return torch.optim.AdamW(self.parameters(), 0.0001) def train(): cfg = pl_init_exp(exp_prefix=TASK_NAME, backup_list=[ __file__, ], inplace=False) dataset = StarfileDataSet(
StarfileDatasetConfig(
1
2023-11-06 07:15:26+00:00
16k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/graphormer/configuration_graphormer.py
[ { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for loading/downloading/saving configurations.\n\n <Tip>\n\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to\n initialize a model does **not** load the model weights. It only affects the model's configuration.\n\n </Tip>\n\n Class attributes (overridden by derived classes):\n\n - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate\n the correct object in [`~transformers.AutoConfig`].\n - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the\n config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:\n [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].\n - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary\n outputs of the model during inference.\n - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized\n naming of attributes.\n\n Common attributes (present in all subclasses):\n\n - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the\n embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).\n - **hidden_size** (`int`) -- The hidden size of the model.\n - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the\n model.\n - **num_hidden_layers** (`int`) -- The number of blocks in the model.\n\n Arg:\n name_or_path (`str`, *optional*, defaults to `\"\"`):\n Store the string that was passed to [`PreTrainedModel.from_pretrained`] or\n [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created\n with such a method.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not the model should return all hidden-states.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not the model should returns all attentions.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.\n is_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as an encoder/decoder or not.\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as decoder or not (in which case it's used as an encoder).\n cross_attention_hidden_size** (`bool`, *optional*):\n The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder\n setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.\n add_cross_attention (`bool`, *optional*, defaults to `False`):\n Whether cross-attention layers should be added to the model. Note, this option is only relevant for models\n that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models\n in `AUTO_MODELS_FOR_CAUSAL_LM`.\n tie_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder\n and decoder model to have the exact same parameter names.\n prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):\n Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of\n heads to prune in said layer.\n\n For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n chunk_size_feed_forward (`int`, *optional*, defaults to `0`):\n The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that\n the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <\n sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed\n Forward Chunking work?](../glossary.html#feed-forward-chunking).\n\n > Parameters for sequence generation\n\n max_length (`int`, *optional*, defaults to 20):\n Maximum length that will be used by default in the `generate` method of the model.\n min_length (`int`, *optional*, defaults to 0):\n Minimum length that will be used by default in the `generate` method of the model.\n do_sample (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;\n use greedy decoding otherwise.\n early_stopping (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search\n when at least `num_beams` sentences are finished per batch or not.\n num_beams (`int`, *optional*, defaults to 1):\n Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means\n no beam search.\n num_beam_groups (`int`, *optional*, defaults to 1):\n Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams\n that will be used by default in the `generate` method of the model. 1 means no group beam search.\n diversity_penalty (`float`, *optional*, defaults to 0.0):\n Value to control diversity for group beam search. that will be used by default in the `generate` method of\n the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.\n temperature (`float`, *optional*, defaults to 1.0):\n The value used to module the next token probabilities that will be used by default in the `generate` method\n of the model. Must be strictly positive.\n top_k (`int`, *optional*, defaults to 50):\n Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in\n the `generate` method of the model.\n top_p (`float`, *optional*, defaults to 1):\n Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,\n only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.\n typical_p (`float`, *optional*, defaults to 1):\n Local typicality measures how similar the conditional probability of predicting a target token next is to\n the expected conditional probability of predicting a random token next, given the partial text already\n generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that\n add up to `typical_p` or higher are kept for generation. See [this\n paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.\n repetition_penalty (`float`, *optional*, defaults to 1):\n Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0\n means no penalty.\n length_penalty (`float`, *optional*, defaults to 1):\n Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to\n the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log\n likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while\n `length_penalty` < 0.0 encourages shorter sequences.\n no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the\n `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can\n only occur once.\n encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by\n default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all\n ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.\n bad_words_ids (`List[int]`, *optional*):\n List of token ids that are not allowed to be generated that will be used by default in the `generate`\n method of the model. In order to get the tokens of the words that should not appear in the generated text,\n use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n num_return_sequences (`int`, *optional*, defaults to 1):\n Number of independently computed returned sequences for each element in the batch that will be used by\n default in the `generate` method of the model.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether the model should return the logits when used for generation.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.\n forced_bos_token_id (`int`, *optional*):\n The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for\n multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target\n language token.\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached.\n remove_invalid_values (`bool`, *optional*):\n Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.\n Note that using `remove_invalid_values` can slow down generation.\n\n > Parameters for fine-tuning tasks\n\n architectures (`List[str]`, *optional*):\n Model architectures that can be used with the model pretrained weights.\n finetuning_task (`str`, *optional*):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow\n or PyTorch) checkpoint.\n id2label (`Dict[int, str]`, *optional*):\n A map from index (for instance prediction index, or target index) to label.\n label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.\n num_labels (`int`, *optional*):\n Number of labels to use in the last layer added to the model, typically for a classification task.\n task_specific_params (`Dict[str, Any]`, *optional*):\n Additional keyword arguments to store for the current task.\n problem_type (`str`, *optional*):\n Problem type for `XxxForSequenceClassification` models. Can be one of `\"regression\"`,\n `\"single_label_classification\"` or `\"multi_label_classification\"`.\n\n > Parameters linked to the tokenizer\n\n tokenizer_class (`str`, *optional*):\n The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the\n model by default).\n prefix (`str`, *optional*):\n A specific prompt that should be added at the beginning of each text before calling the model.\n bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.\n pad_token_id (`int`, *optional*): The id of the _padding_ token.\n eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.\n decoder_start_token_id (`int`, *optional*):\n If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.\n sep_token_id (`int`, *optional*): The id of the _separation_ token.\n\n > PyTorch specific parameters\n\n torchscript (`bool`, *optional*, defaults to `False`):\n Whether or not the model should be used with Torchscript.\n tie_word_embeddings (`bool`, *optional*, defaults to `True`):\n Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the\n model has a output word embedding layer.\n torch_dtype (`str`, *optional*):\n The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`\n (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved\n model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load\n `float16` weights. Since the config object is stored in plain text, this attribute contains just the\n floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the\n `\"float16\"` string.\n\n This attribute is currently not being used during model loading time, but this may change in the future\n versions. But we can already start preparing for the future by saving the dtype with save_pretrained.\n\n > TensorFlow specific parameters\n\n use_bfloat16 (`bool`, *optional*, defaults to `False`):\n Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).\n tf_legacy_loss (`bool`, *optional*, defaults to `False`):\n Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may\n not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers\n v5.\n \"\"\"\n model_type: str = \"\"\n is_composition: bool = False\n attribute_map: Dict[str, str] = {}\n _auto_class: Optional[str] = None\n\n def __setattr__(self, key, value):\n if key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n super().__setattr__(key, value)\n\n def __getattribute__(self, key):\n if key != \"attribute_map\" and key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n return super().__getattribute__(key)\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.return_dict = kwargs.pop(\"return_dict\", True)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.torch_dtype = kwargs.pop(\"torch_dtype\", None) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.tf_legacy_loss = kwargs.pop(\"tf_legacy_loss\", False) # Only used by TensorFlow models\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n self.tie_word_embeddings = kwargs.pop(\n \"tie_word_embeddings\", True\n ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_encoder_decoder = kwargs.pop(\"is_encoder_decoder\", False)\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n self.cross_attention_hidden_size = kwargs.pop(\"cross_attention_hidden_size\", None)\n self.add_cross_attention = kwargs.pop(\"add_cross_attention\", False)\n self.tie_encoder_decoder = kwargs.pop(\"tie_encoder_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.min_length = kwargs.pop(\"min_length\", 0)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.early_stopping = kwargs.pop(\"early_stopping\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.num_beam_groups = kwargs.pop(\"num_beam_groups\", 1)\n self.diversity_penalty = kwargs.pop(\"diversity_penalty\", 0.0)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.typical_p = kwargs.pop(\"typical_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.no_repeat_ngram_size = kwargs.pop(\"no_repeat_ngram_size\", 0)\n self.encoder_no_repeat_ngram_size = kwargs.pop(\"encoder_no_repeat_ngram_size\", 0)\n self.bad_words_ids = kwargs.pop(\"bad_words_ids\", None)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n self.chunk_size_feed_forward = kwargs.pop(\"chunk_size_feed_forward\", 0)\n self.output_scores = kwargs.pop(\"output_scores\", False)\n self.return_dict_in_generate = kwargs.pop(\"return_dict_in_generate\", False)\n self.forced_bos_token_id = kwargs.pop(\"forced_bos_token_id\", None)\n self.forced_eos_token_id = kwargs.pop(\"forced_eos_token_id\", None)\n self.remove_invalid_values = kwargs.pop(\"remove_invalid_values\", False)\n self.exponential_decay_length_penalty = kwargs.pop(\"exponential_decay_length_penalty\", None)\n self.suppress_tokens = kwargs.pop(\"suppress_tokens\", None)\n self.begin_suppress_tokens = kwargs.pop(\"begin_suppress_tokens\", None)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.id2label = kwargs.pop(\"id2label\", None)\n self.label2id = kwargs.pop(\"label2id\", None)\n if self.label2id is not None and not isinstance(self.label2id, dict):\n raise ValueError(\"Argument label2id should be a dictionary.\")\n if self.id2label is not None:\n if not isinstance(self.id2label, dict):\n raise ValueError(\"Argument id2label should be a dictionary.\")\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None and len(self.id2label) != num_labels:\n logger.warning(\n f\"You passed along `num_labels={num_labels}` with an incompatible id to label map: \"\n f\"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}.\"\n )\n self.id2label = {int(key): value for key, value in self.id2label.items()}\n # Keys are always strings in JSON so convert ids to int here.\n else:\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n\n if self.torch_dtype is not None and isinstance(self.torch_dtype, str):\n # we will start using self.torch_dtype in v5, but to be consistent with\n # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object\n if is_torch_available():\n import torch\n\n self.torch_dtype = getattr(torch, self.torch_dtype)\n\n # Tokenizer arguments TODO: eventually tokenizer and models should share the same config\n self.tokenizer_class = kwargs.pop(\"tokenizer_class\", None)\n self.prefix = kwargs.pop(\"prefix\", None)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_id = kwargs.pop(\"eos_token_id\", None)\n self.sep_token_id = kwargs.pop(\"sep_token_id\", None)\n\n self.decoder_start_token_id = kwargs.pop(\"decoder_start_token_id\", None)\n\n # task specific arguments\n self.task_specific_params = kwargs.pop(\"task_specific_params\", None)\n\n # regression / multi-label classification\n self.problem_type = kwargs.pop(\"problem_type\", None)\n allowed_problem_types = (\"regression\", \"single_label_classification\", \"multi_label_classification\")\n if self.problem_type is not None and self.problem_type not in allowed_problem_types:\n raise ValueError(\n f\"The config parameter `problem_type` was not understood: received {self.problem_type} \"\n \"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid.\"\n )\n\n # TPU arguments\n if kwargs.pop(\"xla_device\", None) is not None:\n logger.warning(\n \"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can \"\n \"safely remove it from your `config.json` file.\"\n )\n\n # Name or path to the pretrained checkpoint\n self._name_or_path = str(kwargs.pop(\"name_or_path\", \"\"))\n # Config hash\n self._commit_hash = kwargs.pop(\"_commit_hash\", None)\n\n # Drop the transformers version info\n self.transformers_version = kwargs.pop(\"transformers_version\", None)\n\n # Deal with gradient checkpointing\n if kwargs.get(\"gradient_checkpointing\", False):\n warnings.warn(\n \"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 \"\n \"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the \"\n \"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.\"\n )\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n @property\n def name_or_path(self) -> str:\n return getattr(self, \"_name_or_path\", None)\n\n @name_or_path.setter\n def name_or_path(self, value):\n self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)\n\n @property\n def use_return_dict(self) -> bool:\n \"\"\"\n `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.\n \"\"\"\n # If torchscript is set, force `return_dict=False` to avoid jit errors\n return self.return_dict and not self.torchscript\n\n @property\n def num_labels(self) -> int:\n \"\"\"\n `int`: The number of labels for classification models.\n \"\"\"\n return len(self.id2label)\n\n @num_labels.setter\n def num_labels(self, num_labels: int):\n if not hasattr(self, \"id2label\") or self.id2label is None or len(self.id2label) != num_labels:\n self.id2label = {i: f\"LABEL_{i}\" for i in range(num_labels)}\n self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~PretrainedConfig.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self._set_token_in_kwargs(kwargs)\n\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = self._create_repo(repo_id, **kwargs)\n files_timestamps = self._get_files_timestamps(save_directory)\n\n # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be\n # loaded from the Hub.\n if self._auto_class is not None:\n custom_object_save(self, save_directory, config=self)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file, use_diff=True)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n self._upload_modified_files(\n save_directory,\n repo_id,\n files_timestamps,\n commit_message=commit_message,\n token=kwargs.get(\"token\"),\n )\n\n @staticmethod\n def _set_token_in_kwargs(kwargs, token=None):\n \"\"\"Temporary method to deal with `token` and `use_auth_token`.\n\n This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.\n\n Need to clean up `use_auth_token` in a follow PR.\n \"\"\"\n # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.\n if token is None:\n token = kwargs.pop(\"token\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n\n if use_auth_token is not None:\n warnings.warn(\n \"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\", FutureWarning\n )\n if token is not None:\n raise ValueError(\n \"`token` and `use_auth_token` are both specified. Please set only the argument `token`.\"\n )\n token = use_auth_token\n\n if token is not None:\n kwargs[\"token\"] = token\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n cache_dir: Optional[Union[str, os.PathLike]] = None,\n force_download: bool = False,\n local_files_only: bool = False,\n token: Optional[Union[str, bool]] = None,\n revision: str = \"main\",\n **kwargs,\n ) -> \"PretrainedConfig\":\n r\"\"\"\n Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained model configuration hosted inside a model repo on\n huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or\n namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.\n - a path to a *directory* containing a configuration file saved using the\n [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force to (re-)download the configuration files and override the cached versions if\n they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received file. Attempts to resume the download if such a file\n exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.\n token (`str` or `bool`, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use\n the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n To test a pull request you made on the Hub, you can pass `revision=\"refs/pr/<pr_number>\".\n\n </Tip>\n\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n If `False`, then this function returns just the final configuration object.\n\n If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the\n part of `kwargs` which has not been used to update `config` and is otherwise ignored.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can\n specify the folder name here.\n kwargs (`Dict[str, Any]`, *optional*):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled\n by the `return_unused_kwargs` keyword parameter.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.\n\n Examples:\n\n ```python\n # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained(\n \"bert-base-uncased\"\n ) # Download configuration from huggingface.co and cache.\n config = BertConfig.from_pretrained(\n \"./test/saved_model/\"\n ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*\n config = BertConfig.from_pretrained(\"./test/saved_model/my_configuration.json\")\n config = BertConfig.from_pretrained(\"bert-base-uncased\", output_attentions=True, foo=False)\n assert config.output_attentions == True\n config, unused_kwargs = BertConfig.from_pretrained(\n \"bert-base-uncased\", output_attentions=True, foo=False, return_unused_kwargs=True\n )\n assert config.output_attentions == True\n assert unused_kwargs == {\"foo\": False}\n ```\"\"\"\n kwargs[\"cache_dir\"] = cache_dir\n kwargs[\"force_download\"] = force_download\n kwargs[\"local_files_only\"] = local_files_only\n kwargs[\"revision\"] = revision\n\n cls._set_token_in_kwargs(kwargs, token)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\n [`PretrainedConfig`] using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n\n Returns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.\n\n \"\"\"\n cls._set_token_in_kwargs(kwargs)\n\n original_kwargs = copy.deepcopy(kwargs)\n # Get config dict associated with the base config file\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"_commit_hash\" in config_dict:\n original_kwargs[\"_commit_hash\"] = config_dict[\"_commit_hash\"]\n\n # That config file may point us toward another config file to use.\n if \"configuration_files\" in config_dict:\n configuration_file = get_configuration_file(config_dict[\"configuration_files\"])\n config_dict, kwargs = cls._get_config_dict(\n pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs\n )\n\n return config_dict, kwargs\n\n @classmethod\n def _get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n token = kwargs.pop(\"token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n trust_remote_code = kwargs.pop(\"trust_remote_code\", None)\n subfolder = kwargs.pop(\"subfolder\", \"\")\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n commit_hash = kwargs.pop(\"_commit_hash\", None)\n\n if trust_remote_code is True:\n logger.warning(\n \"The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is\"\n \" ignored.\"\n )\n\n user_agent = {\"file_type\": \"config\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):\n # Special case when pretrained_model_name_or_path is a local file\n resolved_config_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n configuration_file = pretrained_model_name_or_path\n resolved_config_file = download_url(pretrained_model_name_or_path)\n else:\n configuration_file = kwargs.pop(\"_configuration_file\", CONFIG_NAME)\n\n try:\n # Load from local folder or from cache or download from model Hub and cache\n resolved_config_file = cached_file(\n pretrained_model_name_or_path,\n configuration_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n token=token,\n user_agent=user_agent,\n revision=revision,\n subfolder=subfolder,\n _commit_hash=commit_hash,\n )\n commit_hash = extract_commit_hash(resolved_config_file, commit_hash)\n except EnvironmentError:\n # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to\n # the original exception.\n raise\n except Exception:\n # For any other exception, we throw a generic error.\n raise EnvironmentError(\n f\"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it\"\n \" from 'https://huggingface.co/models', make sure you don't have a local directory with the same\"\n f\" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory\"\n f\" containing a {configuration_file} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(resolved_config_file)\n config_dict[\"_commit_hash\"] = commit_hash\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(\n f\"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.\"\n )\n\n if is_local:\n logger.info(f\"loading configuration file {resolved_config_file}\")\n else:\n logger.info(f\"loading configuration file {configuration_file} from cache at {resolved_config_file}\")\n\n if \"auto_map\" in config_dict and not is_local:\n config_dict[\"auto_map\"] = add_model_info_to_auto_map(\n config_dict[\"auto_map\"], pretrained_model_name_or_path\n )\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.\n\n Args:\n config_dict (`Dict[str, Any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be\n retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.\n kwargs (`Dict[str, Any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from those parameters.\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n # Those arguments may be passed along for our internal telemetry.\n # We remove them so they don't appear in `return_unused_kwargs`.\n kwargs.pop(\"_from_auto\", None)\n kwargs.pop(\"_from_pipeline\", None)\n # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.\n if \"_commit_hash\" in kwargs and \"_commit_hash\" in config_dict:\n kwargs[\"_commit_hash\"] = config_dict[\"_commit_hash\"]\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}\n\n # Update config with kwargs if needed\n if \"num_labels\" in kwargs and \"id2label\" in kwargs:\n num_labels = kwargs[\"num_labels\"]\n id2label = kwargs[\"id2label\"] if kwargs[\"id2label\"] is not None else []\n if len(id2label) != num_labels:\n raise ValueError(\n f\"You passed along `num_labels={num_labels }` with an incompatible id to label map: \"\n f\"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove \"\n \"one of them.\"\n )\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n current_attr = getattr(config, key)\n # To authorize passing a custom subconfig as kwarg in models that have nested configs.\n if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):\n value = current_attr.__class__(**value)\n setattr(config, key, value)\n if key != \"torch_dtype\":\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(f\"Model config {config}\")\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: Union[str, os.PathLike]) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.\n\n Args:\n json_file (`str` or `os.PathLike`):\n Path to the JSON file containing the parameters.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from that JSON file.\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n def to_diff_dict(self) -> Dict[str, Any]:\n \"\"\"\n Removes all attributes from config which correspond to the default config attributes for better readability and\n serializes to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n config_dict = self.to_dict()\n\n # get the default config dict\n default_config_dict = PretrainedConfig().to_dict()\n\n # get class specific config dict\n class_config_dict = self.__class__().to_dict() if not self.is_composition else {}\n\n serializable_config_dict = {}\n\n # only serialize values that differ from the default config\n for key, value in config_dict.items():\n if (\n isinstance(getattr(self, key, None), PretrainedConfig)\n and key in class_config_dict\n and isinstance(class_config_dict[key], dict)\n ):\n # For nested configs we need to clean the diff recursively\n diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))\n if \"model_type\" in value:\n # Needs to be set even if it's not in the diff\n diff[\"model_type\"] = value[\"model_type\"]\n if len(diff) > 0:\n serializable_config_dict[key] = diff\n elif (\n key not in default_config_dict\n or key == \"transformers_version\"\n or value != default_config_dict[key]\n or (key in class_config_dict and value != class_config_dict[key])\n ):\n serializable_config_dict[key] = value\n\n if hasattr(self, \"quantization_config\"):\n serializable_config_dict[\"quantization_config\"] = (\n self.quantization_config.to_dict()\n if not isinstance(self.quantization_config, dict)\n else self.quantization_config\n )\n\n self.dict_torch_dtype_to_str(serializable_config_dict)\n\n return serializable_config_dict\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n if \"_auto_class\" in output:\n del output[\"_auto_class\"]\n if \"_commit_hash\" in output:\n del output[\"_commit_hash\"]\n\n # Transformers version when serializing the model\n output[\"transformers_version\"] = __version__\n\n for key, value in output.items():\n # Deal with nested configs like CLIP\n if isinstance(value, PretrainedConfig):\n value = value.to_dict()\n del value[\"transformers_version\"]\n\n output[key] = value\n\n if hasattr(self, \"quantization_config\"):\n output[\"quantization_config\"] = (\n self.quantization_config.to_dict()\n if not isinstance(self.quantization_config, dict)\n else self.quantization_config\n )\n\n self.dict_torch_dtype_to_str(output)\n\n return output\n\n def to_json_string(self, use_diff: bool = True) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Args:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON file.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string(use_diff=use_diff))\n\n def update(self, config_dict: Dict[str, Any]):\n \"\"\"\n Updates attributes of this class with attributes from `config_dict`.\n\n Args:\n config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.\n \"\"\"\n for key, value in config_dict.items():\n setattr(self, key, value)\n\n def update_from_string(self, update_str: str):\n \"\"\"\n Updates attributes of this class with attributes from `update_str`.\n\n The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:\n \"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\n\n The keys to change have to already exist in the config object.\n\n Args:\n update_str (`str`): String with attributes that should be updated for this class.\n\n \"\"\"\n\n d = dict(x.split(\"=\") for x in update_str.split(\",\"))\n for k, v in d.items():\n if not hasattr(self, k):\n raise ValueError(f\"key {k} isn't in the original config dict\")\n\n old_v = getattr(self, k)\n if isinstance(old_v, bool):\n if v.lower() in [\"true\", \"1\", \"y\", \"yes\"]:\n v = True\n elif v.lower() in [\"false\", \"0\", \"n\", \"no\"]:\n v = False\n else:\n raise ValueError(f\"can't derive true or false from {v} (key {k})\")\n elif isinstance(old_v, int):\n v = int(v)\n elif isinstance(old_v, float):\n v = float(v)\n elif not isinstance(old_v, str):\n raise ValueError(\n f\"You can only update int, float, bool or string values in the config, got {v} for key {k}\"\n )\n\n setattr(self, k, v)\n\n def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:\n \"\"\"\n Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,\n converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *\"float32\"*\n string, which can then be stored in the json format.\n \"\"\"\n if d.get(\"torch_dtype\", None) is not None and not isinstance(d[\"torch_dtype\"], str):\n d[\"torch_dtype\"] = str(d[\"torch_dtype\"]).split(\".\")[1]\n for value in d.values():\n if isinstance(value, dict):\n self.dict_torch_dtype_to_str(value)\n\n @classmethod\n def register_for_auto_class(cls, auto_class=\"AutoConfig\"):\n \"\"\"\n Register this class with a given auto class. This should only be used for custom configurations as the ones in\n the library are already mapped with `AutoConfig`.\n\n <Tip warning={true}>\n\n This API is experimental and may have some slight breaking changes in the next releases.\n\n </Tip>\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoConfig\"`):\n The auto class to register this new configuration with.\n \"\"\"\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class" }, { "identifier": "logging", "path": "transformers/src/transformers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\ndef warning_once(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" } ]
from ...configuration_utils import PretrainedConfig from ...utils import logging
12,439
# coding=utf-8 # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Graphormer model configuration""" logger = logging.get_logger(__name__) GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { # pcqm4mv1 now deprecated "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json", # See all Graphormer models at https://huggingface.co/models?filter=graphormer }
# coding=utf-8 # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Graphormer model configuration""" logger = logging.get_logger(__name__) GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { # pcqm4mv1 now deprecated "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json", # See all Graphormer models at https://huggingface.co/models?filter=graphormer }
class GraphormerConfig(PretrainedConfig):
0
2023-11-07 04:23:57+00:00
16k
HKU-BAL/ClairS-TO
src/realign_reads.py
[ { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "BASIC_BASES = set(\"ACGTU\")\nWARNING = '\\033[93m'\nERROR = '\\033[91m'\nENDC = '\\033[0m'\ndef log_error(log):\ndef log_warning(log):\ndef is_file_exists(file_name, suffix=\"\"):\ndef is_folder_exists(folder_name, suffix=\"\"):\ndef legal_range_from(param_name, x, min_num=None, max_num=None, exit_out_of_range=False):\ndef file_path_from(file_name, suffix=\"\", exit_on_not_found=False, sep=\"\", allow_none=False, is_directory=False):\ndef folder_path_from(folder_name, create_not_found=True, exit_on_not_found=False):\ndef is_command_exists(command):\ndef executable_command_string_from(command_to_execute, exit_on_not_found=False):\ndef subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):\ndef str_none(v):\ndef str2bool(v):\ndef region_from(ctg_name, ctg_start=None, ctg_end=None):\ndef reference_sequence_from(samtools_execute_command, fasta_file_path, regions):\ndef vcf_candidates_from(vcf_fn, contig_name=None):\ndef candidate_position_generator_from(\n candidate,\n flanking_base_num,\n begin_to_end\n):\ndef samtools_mpileup_generator_from(\n candidate,\n flanking_base_num,\n begin_to_end\n):\ndef samtools_view_process_from(\n ctg_name,\n ctg_start,\n ctg_end,\n samtools,\n bam_file_path\n):\n def __init__(self, ctg_name=None,\n genotype1=None,\n genotype2=None,\n pos=None,\n ref_base=None,\n alt_base=None,\n candidate=False,\n cigar_count=None,\n confident_variant=False,\n depth=None,\n alt_list=None,\n af=None,\n filter=None,\n af_list=None,\n alt_type_mapping_dict=None,\n extra_infos=\"\",\n qual=None,\n row_str=None):\n def update_info(self, ref_base, alt_base, genotype, extra_infos=\"\"):\n def __init__(self, pos, ref_base, depth, af_list, alt_dict, tumor_alt_dict, extra_infos=\"\"):\n def __init__(self, handle):\n def __del__(self):\nclass Position(object):\nclass AltInfos(object):\nclass TensorStdout(object):" }, { "identifier": "bed_tree_from", "path": "shared/interval_tree.py", "snippet": "def bed_tree_from(bed_file_path,\n expand_region=None,\n contig_name=None,\n bed_ctg_start=None,\n bed_ctg_end=None,\n return_bed_region=False,\n padding=None,\n region=None):\n \"\"\"\n 0-based interval tree [start, end)\n \"\"\"\n\n tree = {}\n if region is not None:\n try:\n ctg_name, start_end = region.split(':')\n ctg_start, ctg_end = int(start_end.split('-')[0]) - 1, int(start_end.split('-')[1]) - 1 # bed format\n except:\n sys.exit(\"[ERROR] Please input the correct format for --region ctg_name:start-end, your input is {}\".format(region))\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid region input: {}\".format(region))\n\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n tree[ctg_name].addi(ctg_start, ctg_end)\n if return_bed_region:\n return tree, None, None\n return tree\n\n if bed_file_path is None or bed_file_path == \"\":\n if return_bed_region:\n return tree, None, None\n return tree\n\n bed_start, bed_end = float('inf'), 0\n unzip_process = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (bed_file_path)))\n for row_id, row in enumerate(unzip_process.stdout):\n if row[0] == '#':\n continue\n columns = row.strip().split()\n\n ctg_name = columns[0]\n if contig_name != None and ctg_name != contig_name:\n continue\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n\n ctg_start, ctg_end = int(columns[1]), int(columns[2])\n\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid bed input in {}-th row {} {} {}\".format(row_id+1, ctg_name, ctg_start, ctg_end))\n\n if bed_ctg_start and bed_ctg_end:\n if ctg_end < bed_ctg_start or ctg_start > bed_ctg_end:\n continue\n if padding:\n ctg_start += padding\n ctg_end -= padding\n bed_start = min(ctg_start, bed_start)\n bed_end = max(ctg_end, bed_end)\n if ctg_start == ctg_end:\n ctg_end += 1\n\n tree[ctg_name].addi(ctg_start, ctg_end)\n\n unzip_process.stdout.close()\n unzip_process.wait()\n if return_bed_region:\n return tree, bed_start, bed_end\n return tree" }, { "identifier": "IntervalTree", "path": "shared/intervaltree/intervaltree.py", "snippet": "class IntervalTree(MutableSet):\n \"\"\"\n A binary lookup tree of intervals.\n The intervals contained in the tree are represented using ``Interval(a, b, data)`` objects.\n Each such object represents a half-open interval ``[a, b)`` with optional data.\n\n Examples:\n ---------\n\n Initialize a blank tree::\n\n >>> tree = IntervalTree()\n >>> tree\n IntervalTree()\n\n Initialize a tree from an iterable set of Intervals in O(n * log n)::\n\n >>> tree = IntervalTree([Interval(-10, 10), Interval(-20.0, -10.0)])\n >>> tree\n IntervalTree([Interval(-20.0, -10.0), Interval(-10, 10)])\n >>> len(tree)\n 2\n\n Note that this is a set, i.e. repeated intervals are ignored. However,\n Intervals with different data fields are regarded as different::\n\n >>> tree = IntervalTree([Interval(-10, 10), Interval(-10, 10), Interval(-10, 10, \"x\")])\n >>> tree\n IntervalTree([Interval(-10, 10), Interval(-10, 10, 'x')])\n >>> len(tree)\n 2\n\n Insertions::\n >>> tree = IntervalTree()\n >>> tree[0:1] = \"data\"\n >>> tree.add(Interval(10, 20))\n >>> tree.addi(19.9, 20)\n >>> tree\n IntervalTree([Interval(0, 1, 'data'), Interval(10, 20), Interval(19.9, 20)])\n >>> tree.update([Interval(19.9, 20.1), Interval(20.1, 30)])\n >>> len(tree)\n 5\n\n Inserting the same Interval twice does nothing::\n >>> tree = IntervalTree()\n >>> tree[-10:20] = \"arbitrary data\"\n >>> tree[-10:20] = None # Note that this is also an insertion\n >>> tree\n IntervalTree([Interval(-10, 20), Interval(-10, 20, 'arbitrary data')])\n >>> tree[-10:20] = None # This won't change anything\n >>> tree[-10:20] = \"arbitrary data\" # Neither will this\n >>> len(tree)\n 2\n\n Deletions::\n >>> tree = IntervalTree(Interval(b, e) for b, e in [(-10, 10), (-20, -10), (10, 20)])\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(-10, 10), Interval(10, 20)])\n >>> tree.remove(Interval(-10, 10))\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(10, 20)])\n >>> tree.remove(Interval(-10, 10))\n Traceback (most recent call last):\n ...\n ValueError\n >>> tree.discard(Interval(-10, 10)) # Same as remove, but no exception on failure\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(10, 20)])\n\n Delete intervals, overlapping a given point::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.1)\n >>> tree\n IntervalTree([Interval(-1.1, 1.1)])\n\n Delete intervals, overlapping an interval::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_overlap(0, 0.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.7, 1.8)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.6, 1.6) # Null interval does nothing\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.6, 1.5) # Ditto\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n\n Delete intervals, enveloped in the range::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_envelop(-1.0, 1.5)\n >>> tree\n IntervalTree([Interval(-1.1, 1.1), Interval(0.5, 1.7)])\n >>> tree.remove_envelop(-1.1, 1.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_envelop(0.5, 1.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_envelop(0.5, 1.7)\n >>> tree\n IntervalTree()\n\n Point queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree[-1.1] == set([Interval(-1.1, 1.1)])\n >>> assert tree.at(1.1) == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)]) # Same as tree[1.1]\n >>> assert tree.at(1.5) == set([Interval(0.5, 1.7)]) # Same as tree[1.5]\n\n Interval overlap queries\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree.overlap(1.7, 1.8) == set()\n >>> assert tree.overlap(1.5, 1.8) == set([Interval(0.5, 1.7)])\n >>> assert tree[1.5:1.8] == set([Interval(0.5, 1.7)]) # same as previous\n >>> assert tree.overlap(1.1, 1.8) == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree[1.1:1.8] == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)]) # same as previous\n\n Interval envelop queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree.envelop(-0.5, 0.5) == set()\n >>> assert tree.envelop(-0.5, 1.5) == set([Interval(-0.5, 1.5)])\n\n Membership queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> Interval(-0.5, 0.5) in tree\n False\n >>> Interval(-1.1, 1.1) in tree\n True\n >>> Interval(-1.1, 1.1, \"x\") in tree\n False\n >>> tree.overlaps(-1.1)\n True\n >>> tree.overlaps(1.7)\n False\n >>> tree.overlaps(1.7, 1.8)\n False\n >>> tree.overlaps(-1.2, -1.1)\n False\n >>> tree.overlaps(-1.2, -1.0)\n True\n\n Sizing::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> len(tree)\n 3\n >>> tree.is_empty()\n False\n >>> IntervalTree().is_empty()\n True\n >>> not tree\n False\n >>> not IntervalTree()\n True\n >>> print(tree.begin()) # using print() because of floats in Python 2.6\n -1.1\n >>> print(tree.end()) # ditto\n 1.7\n\n Iteration::\n\n >>> tree = IntervalTree([Interval(-11, 11), Interval(-5, 15), Interval(5, 17)])\n >>> [iv.begin for iv in sorted(tree)]\n [-11, -5, 5]\n >>> assert tree.items() == set([Interval(-5, 15), Interval(-11, 11), Interval(5, 17)])\n\n Copy- and typecasting, pickling::\n\n >>> tree0 = IntervalTree([Interval(0, 1, \"x\"), Interval(1, 2, [\"x\"])])\n >>> tree1 = IntervalTree(tree0) # Shares Interval objects\n >>> tree2 = tree0.copy() # Shallow copy (same as above, as Intervals are singletons)\n >>> import pickle\n >>> tree3 = pickle.loads(pickle.dumps(tree0)) # Deep copy\n >>> list(tree0[1])[0].data[0] = \"y\" # affects shallow copies, but not deep copies\n >>> tree0\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree1\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree2\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree3\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['x'])])\n\n Equality testing::\n\n >>> IntervalTree([Interval(0, 1)]) == IntervalTree([Interval(0, 1)])\n True\n >>> IntervalTree([Interval(0, 1)]) == IntervalTree([Interval(0, 1, \"x\")])\n False\n \"\"\"\n @classmethod\n def from_tuples(cls, tups):\n \"\"\"\n Create a new IntervalTree from an iterable of 2- or 3-tuples,\n where the tuple lists begin, end, and optionally data.\n \"\"\"\n ivs = [Interval(*t) for t in tups]\n return IntervalTree(ivs)\n\n def __init__(self, intervals=None):\n \"\"\"\n Set up a tree. If intervals is provided, add all the intervals\n to the tree.\n\n Completes in O(n*log n) time.\n \"\"\"\n intervals = set(intervals) if intervals is not None else set()\n for iv in intervals:\n if iv.is_null():\n raise ValueError(\n \"IntervalTree: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n self.all_intervals = intervals\n self.top_node = Node.from_intervals(self.all_intervals)\n self.boundary_table = SortedDict()\n for iv in self.all_intervals:\n self._add_boundaries(iv)\n\n def copy(self):\n \"\"\"\n Construct a new IntervalTree using shallow copies of the\n intervals in the source tree.\n\n Completes in O(n*log n) time.\n :rtype: IntervalTree\n \"\"\"\n return IntervalTree(iv.copy() for iv in self)\n\n def _add_boundaries(self, interval):\n \"\"\"\n Records the boundaries of the interval in the boundary table.\n \"\"\"\n begin = interval.begin\n end = interval.end\n if begin in self.boundary_table:\n self.boundary_table[begin] += 1\n else:\n self.boundary_table[begin] = 1\n\n if end in self.boundary_table:\n self.boundary_table[end] += 1\n else:\n self.boundary_table[end] = 1\n\n def _remove_boundaries(self, interval):\n \"\"\"\n Removes the boundaries of the interval from the boundary table.\n \"\"\"\n begin = interval.begin\n end = interval.end\n if self.boundary_table[begin] == 1:\n del self.boundary_table[begin]\n else:\n self.boundary_table[begin] -= 1\n\n if self.boundary_table[end] == 1:\n del self.boundary_table[end]\n else:\n self.boundary_table[end] -= 1\n\n def add(self, interval):\n \"\"\"\n Adds an interval to the tree, if not already present.\n\n Completes in O(log n) time.\n \"\"\"\n if interval in self:\n return\n\n if interval.is_null():\n raise ValueError(\n \"IntervalTree: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(interval)\n )\n\n if not self.top_node:\n self.top_node = Node.from_interval(interval)\n else:\n self.top_node = self.top_node.add(interval)\n self.all_intervals.add(interval)\n self._add_boundaries(interval)\n append = add\n\n def addi(self, begin, end, data=None):\n \"\"\"\n Shortcut for add(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.add(Interval(begin, end, data))\n appendi = addi\n\n def update(self, intervals):\n \"\"\"\n Given an iterable of intervals, add them to the tree.\n\n Completes in O(m*log(n+m), where m = number of intervals to\n add.\n \"\"\"\n for iv in intervals:\n self.add(iv)\n\n def remove(self, interval):\n \"\"\"\n Removes an interval from the tree, if present. If not, raises\n ValueError.\n\n Completes in O(log n) time.\n \"\"\"\n #self.verify()\n if interval not in self:\n #print(self.all_intervals)\n raise ValueError\n self.top_node = self.top_node.remove(interval)\n self.all_intervals.remove(interval)\n self._remove_boundaries(interval)\n #self.verify()\n\n def removei(self, begin, end, data=None):\n \"\"\"\n Shortcut for remove(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.remove(Interval(begin, end, data))\n\n def discard(self, interval):\n \"\"\"\n Removes an interval from the tree, if present. If not, does\n nothing.\n\n Completes in O(log n) time.\n \"\"\"\n if interval not in self:\n return\n self.all_intervals.discard(interval)\n self.top_node = self.top_node.discard(interval)\n self._remove_boundaries(interval)\n\n def discardi(self, begin, end, data=None):\n \"\"\"\n Shortcut for discard(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.discard(Interval(begin, end, data))\n\n def difference(self, other):\n \"\"\"\n Returns a new tree, comprising all intervals in self but not\n in other.\n \"\"\"\n ivs = set()\n for iv in self:\n if iv not in other:\n ivs.add(iv)\n return IntervalTree(ivs)\n\n def difference_update(self, other):\n \"\"\"\n Removes all intervals in other from self.\n \"\"\"\n for iv in other:\n self.discard(iv)\n\n def union(self, other):\n \"\"\"\n Returns a new tree, comprising all intervals from self\n and other.\n \"\"\"\n return IntervalTree(set(self).union(other))\n\n def intersection(self, other):\n \"\"\"\n Returns a new tree of all intervals common to both self and\n other.\n \"\"\"\n ivs = set()\n shorter, longer = sorted([self, other], key=len)\n for iv in shorter:\n if iv in longer:\n ivs.add(iv)\n return IntervalTree(ivs)\n\n def intersection_update(self, other):\n \"\"\"\n Removes intervals from self unless they also exist in other.\n \"\"\"\n ivs = list(self)\n for iv in ivs:\n if iv not in other:\n self.remove(iv)\n\n def symmetric_difference(self, other):\n \"\"\"\n Return a tree with elements only in self or other but not\n both.\n \"\"\"\n if not isinstance(other, set): other = set(other)\n me = set(self)\n ivs = me.difference(other).union(other.difference(me))\n return IntervalTree(ivs)\n\n def symmetric_difference_update(self, other):\n \"\"\"\n Throws out all intervals except those only in self or other,\n not both.\n \"\"\"\n other = set(other)\n ivs = list(self)\n for iv in ivs:\n if iv in other:\n self.remove(iv)\n other.remove(iv)\n self.update(other)\n\n def remove_overlap(self, begin, end=None):\n \"\"\"\n Removes all intervals overlapping the given point or range.\n\n Completes in O((r+m)*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * r = size of the search range (this is 1 for a point)\n \"\"\"\n hitlist = self.at(begin) if end is None else self.overlap(begin, end)\n for iv in hitlist:\n self.remove(iv)\n\n def remove_envelop(self, begin, end):\n \"\"\"\n Removes all intervals completely enveloped in the given range.\n\n Completes in O((r+m)*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * r = size of the search range\n \"\"\"\n hitlist = self.envelop(begin, end)\n for iv in hitlist:\n self.remove(iv)\n\n def chop(self, begin, end, datafunc=None):\n \"\"\"\n Like remove_envelop(), but trims back Intervals hanging into\n the chopped area so that nothing overlaps.\n \"\"\"\n insertions = set()\n begin_hits = [iv for iv in self.at(begin) if iv.begin < begin]\n end_hits = [iv for iv in self.at(end) if iv.end > end]\n\n if datafunc:\n for iv in begin_hits:\n insertions.add(Interval(iv.begin, begin, datafunc(iv, True)))\n for iv in end_hits:\n insertions.add(Interval(end, iv.end, datafunc(iv, False)))\n else:\n for iv in begin_hits:\n insertions.add(Interval(iv.begin, begin, iv.data))\n for iv in end_hits:\n insertions.add(Interval(end, iv.end, iv.data))\n\n self.remove_envelop(begin, end)\n self.difference_update(begin_hits)\n self.difference_update(end_hits)\n self.update(insertions)\n\n def slice(self, point, datafunc=None):\n \"\"\"\n Split Intervals that overlap point into two new Intervals. if\n specified, uses datafunc(interval, islower=True/False) to\n set the data field of the new Intervals.\n :param point: where to slice\n :param datafunc(interval, isupper): callable returning a new\n value for the interval's data field\n \"\"\"\n hitlist = set(iv for iv in self.at(point) if iv.begin < point)\n insertions = set()\n if datafunc:\n for iv in hitlist:\n insertions.add(Interval(iv.begin, point, datafunc(iv, True)))\n insertions.add(Interval(point, iv.end, datafunc(iv, False)))\n else:\n for iv in hitlist:\n insertions.add(Interval(iv.begin, point, iv.data))\n insertions.add(Interval(point, iv.end, iv.data))\n self.difference_update(hitlist)\n self.update(insertions)\n\n def clear(self):\n \"\"\"\n Empties the tree.\n\n Completes in O(1) tine.\n \"\"\"\n self.__init__()\n\n def find_nested(self):\n \"\"\"\n Returns a dictionary mapping parent intervals to sets of\n intervals overlapped by and contained in the parent.\n\n Completes in O(n^2) time.\n :rtype: dict of [Interval, set of Interval]\n \"\"\"\n result = {}\n\n def add_if_nested():\n if parent.contains_interval(child):\n if parent not in result:\n result[parent] = set()\n result[parent].add(child)\n\n long_ivs = sorted(self.all_intervals, key=Interval.length, reverse=True)\n for i, parent in enumerate(long_ivs):\n for child in long_ivs[i + 1:]:\n add_if_nested()\n return result\n\n def overlaps(self, begin, end=None):\n \"\"\"\n Returns whether some interval in the tree overlaps the given\n point or range.\n\n Completes in O(r*log n) time, where r is the size of the\n search range.\n :rtype: bool\n \"\"\"\n if end is not None:\n return self.overlaps_range(begin, end)\n elif isinstance(begin, Number):\n return self.overlaps_point(begin)\n else:\n return self.overlaps_range(begin.begin, begin.end)\n\n def overlaps_point(self, p):\n \"\"\"\n Returns whether some interval in the tree overlaps p.\n\n Completes in O(log n) time.\n :rtype: bool\n \"\"\"\n if self.is_empty():\n return False\n return bool(self.top_node.contains_point(p))\n\n def overlaps_range(self, begin, end):\n \"\"\"\n Returns whether some interval in the tree overlaps the given\n range. Returns False if given a null interval over which to\n test.\n\n Completes in O(r*log n) time, where r is the range length and n\n is the table size.\n :rtype: bool\n \"\"\"\n if self.is_empty():\n return False\n elif begin >= end:\n return False\n elif self.overlaps_point(begin):\n return True\n return any(\n self.overlaps_point(bound)\n for bound in self.boundary_table\n if begin < bound < end\n )\n\n def split_overlaps(self):\n \"\"\"\n Finds all intervals with overlapping ranges and splits them\n along the range boundaries.\n\n Completes in worst-case O(n^2*log n) time (many interval\n boundaries are inside many intervals), best-case O(n*log n)\n time (small number of overlaps << n per interval).\n \"\"\"\n if not self:\n return\n if len(self.boundary_table) == 2:\n return\n\n bounds = sorted(self.boundary_table) # get bound locations\n\n new_ivs = set()\n for lbound, ubound in zip(bounds[:-1], bounds[1:]):\n for iv in self[lbound]:\n new_ivs.add(Interval(lbound, ubound, iv.data))\n\n self.__init__(new_ivs)\n\n def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True):\n \"\"\"\n Finds all intervals with overlapping ranges and merges them\n into a single interval. If provided, uses data_reducer and\n data_initializer with similar semantics to Python's built-in\n reduce(reducer_func[, initializer]), as follows:\n\n If data_reducer is set to a function, combines the data\n fields of the Intervals with\n current_reduced_data = data_reducer(current_reduced_data, new_data)\n If data_reducer is None, the merged Interval's data\n field will be set to None, ignoring all the data fields\n of the merged Intervals.\n\n On encountering the first Interval to merge, if\n data_initializer is None (default), uses the first\n Interval's data field as the first value for\n current_reduced_data. If data_initializer is not None,\n current_reduced_data is set to a shallow copy of\n data_initializer created with copy.copy(data_initializer).\n\n If strict is True (default), intervals are only merged if\n their ranges actually overlap; adjacent, touching intervals\n will not be merged. If strict is False, intervals are merged\n even if they are only end-to-end adjacent.\n\n Completes in O(n*logn).\n \"\"\"\n if not self:\n return\n\n sorted_intervals = sorted(self.all_intervals) # get sorted intervals\n merged = []\n # use mutable object to allow new_series() to modify it\n current_reduced = [None]\n higher = None # iterating variable, which new_series() needs access to\n\n def new_series():\n if data_initializer is None:\n current_reduced[0] = higher.data\n merged.append(higher)\n return\n else: # data_initializer is not None\n current_reduced[0] = copy(data_initializer)\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n merged.append(Interval(higher.begin, higher.end, current_reduced[0]))\n\n for higher in sorted_intervals:\n if merged: # series already begun\n lower = merged[-1]\n if (higher.begin < lower.end or\n not strict and higher.begin == lower.end): # should merge\n upper_bound = max(lower.end, higher.end)\n if data_reducer is not None:\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n else: # annihilate the data, since we don't know how to merge it\n current_reduced[0] = None\n merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])\n else:\n new_series()\n else: # not merged; is first of Intervals to merge\n new_series()\n\n self.__init__(merged)\n\n def merge_equals(self, data_reducer=None, data_initializer=None):\n \"\"\"\n Finds all intervals with equal ranges and merges them\n into a single interval. If provided, uses data_reducer and\n data_initializer with similar semantics to Python's built-in\n reduce(reducer_func[, initializer]), as follows:\n\n If data_reducer is set to a function, combines the data\n fields of the Intervals with\n current_reduced_data = data_reducer(current_reduced_data, new_data)\n If data_reducer is None, the merged Interval's data\n field will be set to None, ignoring all the data fields\n of the merged Intervals.\n\n On encountering the first Interval to merge, if\n data_initializer is None (default), uses the first\n Interval's data field as the first value for\n current_reduced_data. If data_initializer is not None,\n current_reduced_data is set to a shallow copy of\n data_initiazer created with\n copy.copy(data_initializer).\n\n Completes in O(n*logn).\n \"\"\"\n if not self:\n return\n\n sorted_intervals = sorted(self.all_intervals) # get sorted intervals\n merged = []\n # use mutable object to allow new_series() to modify it\n current_reduced = [None]\n higher = None # iterating variable, which new_series() needs access to\n\n def new_series():\n if data_initializer is None:\n current_reduced[0] = higher.data\n merged.append(higher)\n return\n else: # data_initializer is not None\n current_reduced[0] = copy(data_initializer)\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n merged.append(Interval(higher.begin, higher.end, current_reduced[0]))\n\n for higher in sorted_intervals:\n if merged: # series already begun\n lower = merged[-1]\n if higher.range_matches(lower): # should merge\n upper_bound = max(lower.end, higher.end)\n if data_reducer is not None:\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n else: # annihilate the data, since we don't know how to merge it\n current_reduced[0] = None\n merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])\n else:\n new_series()\n else: # not merged; is first of Intervals to merge\n new_series()\n\n self.__init__(merged)\n\n def items(self):\n \"\"\"\n Constructs and returns a set of all intervals in the tree.\n\n Completes in O(n) time.\n :rtype: set of Interval\n \"\"\"\n return set(self.all_intervals)\n\n def is_empty(self):\n \"\"\"\n Returns whether the tree is empty.\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n return 0 == len(self)\n\n def at(self, p):\n \"\"\"\n Returns the set of all intervals that contain p.\n\n Completes in O(m + log n) time, where:\n * n = size of the tree\n * m = number of matches\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n return root.search_point(p, set())\n\n def envelop(self, begin, end=None):\n \"\"\"\n Returns the set of all intervals fully contained in the range\n [begin, end).\n\n Completes in O(m + k*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n if end is None:\n iv = begin\n return self.envelop(iv.begin, iv.end)\n elif begin >= end:\n return set()\n result = root.search_point(begin, set()) # bound_begin might be greater\n boundary_table = self.boundary_table\n bound_begin = boundary_table.bisect_left(begin)\n bound_end = boundary_table.bisect_left(end) # up to, but not including end\n result.update(root.search_overlap(\n # slice notation is slightly slower\n boundary_table.keys()[index] for index in xrange(bound_begin, bound_end)\n ))\n\n # TODO: improve envelop() to use node info instead of less-efficient filtering\n result = set(\n iv for iv in result\n if iv.begin >= begin and iv.end <= end\n )\n return result\n\n def overlap(self, begin, end=None):\n \"\"\"\n Returns a set of all intervals overlapping the given range.\n\n Completes in O(m + k*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n if end is None:\n iv = begin\n return self.overlap(iv.begin, iv.end)\n elif begin >= end:\n return set()\n result = root.search_point(begin, set()) # bound_begin might be greater\n boundary_table = self.boundary_table\n bound_begin = boundary_table.bisect_left(begin)\n bound_end = boundary_table.bisect_left(end) # up to, but not including end\n result.update(root.search_overlap(\n # slice notation is slightly slower\n boundary_table.keys()[index] for index in xrange(bound_begin, bound_end)\n ))\n return result\n\n def begin(self):\n \"\"\"\n Returns the lower bound of the first interval in the tree.\n\n Completes in O(1) time.\n \"\"\"\n if not self.boundary_table:\n return 0\n return self.boundary_table.keys()[0]\n\n def end(self):\n \"\"\"\n Returns the upper bound of the last interval in the tree.\n\n Completes in O(1) time.\n \"\"\"\n if not self.boundary_table:\n return 0\n return self.boundary_table.keys()[-1]\n\n def range(self):\n \"\"\"\n Returns a minimum-spanning Interval that encloses all the\n members of this IntervalTree. If the tree is empty, returns\n null Interval.\n :rtype: Interval\n \"\"\"\n return Interval(self.begin(), self.end())\n\n def span(self):\n \"\"\"\n Returns the length of the minimum-spanning Interval that\n encloses all the members of this IntervalTree. If the tree\n is empty, return 0.\n \"\"\"\n if not self:\n return 0\n return self.end() - self.begin()\n\n def print_structure(self, tostring=False):\n \"\"\"\n ## FOR DEBUGGING ONLY ##\n Pretty-prints the structure of the tree.\n If tostring is true, prints nothing and returns a string.\n :rtype: None or str\n \"\"\"\n if self.top_node:\n return self.top_node.print_structure(tostring=tostring)\n else:\n result = \"<empty IntervalTree>\"\n if not tostring:\n print(result)\n else:\n return result\n\n def verify(self):\n \"\"\"\n ## FOR DEBUGGING ONLY ##\n Checks the table to ensure that the invariants are held.\n \"\"\"\n if self.all_intervals:\n ## top_node.all_children() == self.all_intervals\n try:\n assert self.top_node.all_children() == self.all_intervals\n except AssertionError as e:\n print(\n 'Error: the tree and the membership set are out of sync!'\n )\n tivs = set(self.top_node.all_children())\n print('top_node.all_children() - all_intervals:')\n try:\n pprint\n except NameError:\n from pprint import pprint\n pprint(tivs - self.all_intervals)\n print('all_intervals - top_node.all_children():')\n pprint(self.all_intervals - tivs)\n raise e\n\n ## All members are Intervals\n for iv in self:\n assert isinstance(iv, Interval), (\n \"Error: Only Interval objects allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n\n ## No null intervals\n for iv in self:\n assert not iv.is_null(), (\n \"Error: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n\n ## Reconstruct boundary_table\n bound_check = {}\n for iv in self:\n if iv.begin in bound_check:\n bound_check[iv.begin] += 1\n else:\n bound_check[iv.begin] = 1\n if iv.end in bound_check:\n bound_check[iv.end] += 1\n else:\n bound_check[iv.end] = 1\n\n ## Reconstructed boundary table (bound_check) ==? boundary_table\n assert set(self.boundary_table.keys()) == set(bound_check.keys()),\\\n 'Error: boundary_table is out of sync with ' \\\n 'the intervals in the tree!'\n\n # For efficiency reasons this should be iteritems in Py2, but we\n # don't care much for efficiency in debug methods anyway.\n for key, val in self.boundary_table.items():\n assert bound_check[key] == val, \\\n 'Error: boundary_table[{0}] should be {1},' \\\n ' but is {2}!'.format(\n key, bound_check[key], val)\n\n ## Internal tree structure\n self.top_node.verify(set())\n else:\n ## Verify empty tree\n assert not self.boundary_table, \\\n \"Error: boundary table should be empty!\"\n assert self.top_node is None, \\\n \"Error: top_node isn't None!\"\n\n def score(self, full_report=False):\n \"\"\"\n Returns a number between 0 and 1, indicating how suboptimal the tree\n is. The lower, the better. Roughly, this number represents the\n fraction of flawed Intervals in the tree.\n :rtype: float\n \"\"\"\n if len(self) <= 2:\n return 0.0\n\n n = len(self)\n m = self.top_node.count_nodes()\n\n def s_center_score():\n \"\"\"\n Returns a normalized score, indicating roughly how many times\n intervals share s_center with other intervals. Output is full-scale\n from 0 to 1.\n :rtype: float\n \"\"\"\n raw = n - m\n maximum = n - 1\n return raw / float(maximum)\n\n report = {\n \"depth\": self.top_node.depth_score(n, m),\n \"s_center\": s_center_score(),\n }\n cumulative = max(report.values())\n report[\"_cumulative\"] = cumulative\n if full_report:\n return report\n return cumulative\n\n\n def __getitem__(self, index):\n \"\"\"\n Returns a set of all intervals overlapping the given index or\n slice.\n\n Completes in O(k * log(n) + m) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range (this is 1 for a point)\n :rtype: set of Interval\n \"\"\"\n try:\n start, stop = index.start, index.stop\n if start is None:\n start = self.begin()\n if stop is None:\n return set(self)\n if stop is None:\n stop = self.end()\n return self.overlap(start, stop)\n except AttributeError:\n return self.at(index)\n\n def __setitem__(self, index, value):\n \"\"\"\n Adds a new interval to the tree. A shortcut for\n add(Interval(index.start, index.stop, value)).\n\n If an identical Interval object with equal range and data\n already exists, does nothing.\n\n Completes in O(log n) time.\n \"\"\"\n self.addi(index.start, index.stop, value)\n\n def __delitem__(self, point):\n \"\"\"\n Delete all items overlapping point.\n \"\"\"\n self.remove_overlap(point)\n\n def __contains__(self, item):\n \"\"\"\n Returns whether item exists as an Interval in the tree.\n This method only returns True for exact matches; for\n overlaps, see the overlaps() method.\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n # Removed point-checking code; it might trick the user into\n # thinking that this is O(1), which point-checking isn't.\n #if isinstance(item, Interval):\n return item in self.all_intervals\n #else:\n # return self.contains_point(item)\n\n def containsi(self, begin, end, data=None):\n \"\"\"\n Shortcut for (Interval(begin, end, data) in tree).\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n return Interval(begin, end, data) in self\n\n def __iter__(self):\n \"\"\"\n Returns an iterator over all the intervals in the tree.\n\n Completes in O(1) time.\n :rtype: collections.Iterable[Interval]\n \"\"\"\n return self.all_intervals.__iter__()\n iter = __iter__\n\n def __len__(self):\n \"\"\"\n Returns how many intervals are in the tree.\n\n Completes in O(1) time.\n :rtype: int\n \"\"\"\n return len(self.all_intervals)\n\n def __eq__(self, other):\n \"\"\"\n Whether two IntervalTrees are equal.\n\n Completes in O(n) time if sizes are equal; O(1) time otherwise.\n :rtype: bool\n \"\"\"\n return (\n isinstance(other, IntervalTree) and\n self.all_intervals == other.all_intervals\n )\n\n def __repr__(self):\n \"\"\"\n :rtype: str\n \"\"\"\n ivs = sorted(self)\n if not ivs:\n return \"IntervalTree()\"\n else:\n return \"IntervalTree({0})\".format(ivs)\n\n __str__ = __repr__\n\n def __reduce__(self):\n \"\"\"\n For pickle-ing.\n :rtype: tuple\n \"\"\"\n return IntervalTree, (sorted(self.all_intervals),)" } ]
import sys import os import shlex import ctypes import re import subprocess import shared.param as param from subprocess import PIPE from argparse import ArgumentParser, SUPPRESS from collections import defaultdict from shared.utils import subprocess_popen, reference_sequence_from, IUPAC_base_to_ACGT_base_dict as BASE2ACGT, log_error from shared.interval_tree import bed_tree_from from shared.intervaltree.intervaltree import IntervalTree
13,875
chunk_start += CHUNK_SIZE chunk_end += CHUNK_SIZE read = Read(read_start=POS, seq=SEQ, cigar=CIGAR, mapping_quality=MAPQ, base_quality=QUAL, strand=STRAND, raw_base_quality=raw_base_quality, read_name=read_name, flag=FLAG, PNEXT=PNEXT, RNEXT=RNEXT, TLEN=TLEN, phasing=HP_TAG) if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): continue aligned_reads[read_name] = read if MAPQ < min_dbg_mapping_quality: continue advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == '=': reference_position += advance query_position += advance elif c == "M" or c == 'X': for _ in range(advance): if QUAL[query_position] >= min_dbg_base_quality: reference_base = reference_sequence[reference_position - reference_start_0_based] # 0 base query_base = SEQ[query_position] if reference_base in 'ACGT' and query_base != reference_base: pileup[reference_position]['X'] += 1 reference_position += 1 query_position += 1 elif c == "I" or c == 'S': pre_base = reference_sequence[reference_position - reference_start_0_based - 1] ins_base_quality = QUAL[query_position: query_position + advance] out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp if not out_of_region and pre_base in 'ACGT' and ( sum([True for bq in ins_base_quality if bq < min_dbg_base_quality]) == 0): # skip the bad seq start = reference_position - advance end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # insertion consumes query query_position += advance elif c == "D": out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp pre_base = reference_sequence[reference_position - reference_start_0_based - 1] # 0-base if not out_of_region and pre_base in 'ACGT': start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path))
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. realign_chunk_size = 5000 min_dbg_mapping_quality = min_dbg_base_quality = 20 region_expansion_in_bp = expand_align_ref_region = 20 min_windows_distance = expand_align_ref_region * 4 max_window_size = max_region_reads_num = 1000 expandReferenceRegion = 100000 realigner_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/realigner',))) dbg_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/debruijn_graph',))) if not os.path.exists(realigner_mod) or not os.path.exists(dbg_mod): # try to find modules in clair3 python_path = subprocess.run('which python', stdout=subprocess.PIPE, shell=True).stdout.decode().rstrip() conda_prefix = os.path.dirname(os.path.dirname(python_path)) clair3_realign_path = os.path.join(conda_prefix, 'bin', 'preprocess', 'realign') clair3_realigner_mod = os.path.join(clair3_realign_path, 'realigner') clair3_dbg_mod = os.path.join(clair3_realign_path, 'debruijn_graph') if os.path.exists(clair3_realigner_mod) and os.path.exists(clair3_dbg_mod): realigner_mod = clair3_realigner_mod dbg_mod = clair3_dbg_mod else: print(log_error("[ERROR] `realigner` or `debruijn_graph` submodule not found in conda environment, pls install clair3-illumina package!")) sys.exit(1) realigner = ctypes.cdll.LoadLibrary(realigner_mod) dbg = ctypes.cdll.LoadLibrary(dbg_mod) class StructPointer(ctypes.Structure): _fields_ = [("position", ctypes.c_int * max_region_reads_num), ("cigar_string", ctypes.c_char_p * max_region_reads_num), ] class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base): return base if base == "N" else BASE2ACGT[base] def region_from(ctg_name, ctg_start=None, ctg_end=None): """ 1-based region string [start, end] """ if ctg_name is None: return "" if (ctg_start is None) != (ctg_end is None): return "" if ctg_start is None and ctg_end is None: return "{}".format(ctg_name) return "{}:{}-{}".format(ctg_name, ctg_start, ctg_end) class TensorStdout(object): def __init__(self, handle): self.stdin = handle def __del__(self): self.stdin.close() def get_halpotype_tag(samtools_view_columns): found_hp_tag = False tag = [c for c in samtools_view_columns if 'HP:i:' in c] if not len(tag) or len(tag[0]) < 6 or not tag[0][5].isdigit(): return None return tag[0][5] def is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): soft_clipped_bases = 0 total_alignment_positions = 0 advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == "S": soft_clipped_bases += advance total_alignment_positions += advance advance = 0 # skip a read less than 55% aligned return 1.0 - float(soft_clipped_bases) / (total_alignment_positions + 1) < 0.55 def samtools_view_generator_from(samtools_view_process, aligned_reads, pileup, ctg_name, reference_sequence, reference_start_0_based, header, center_pos=None): CHUNK_SIZE = realign_chunk_size chunk_start, chunk_end = None, None for row_id, row in enumerate(samtools_view_process.stdout): if row[0] == '@': header.append(row) continue columns = row.strip().split() RNAME = columns[2] if RNAME != ctg_name: continue read_name = columns[0] FLAG = int(columns[1]) POS = int(columns[3]) - 1 # switch from 1-base to 0-base to match sequence index MAPQ = int(columns[4]) CIGAR = columns[5] SEQ = columns[9].upper() # uppercase for SEQ (regexp is \*|[A-Za-z=.]+) RNEXT = columns[6] PNEXT = columns[7] TLEN = columns[8] reference_position = POS query_position = 0 raw_base_quality = columns[10] QUAL = [phredscore2raw_score(item) for item in raw_base_quality] STRAND = (16 == (FLAG & 16)) HP_TAG = get_halpotype_tag(columns[11:]) read_name += "_" + str(int(STRAND)) # distinguish two strand if chunk_start is None: chunk_start = POS chunk_end = chunk_start + CHUNK_SIZE if POS >= chunk_end + region_expansion_in_bp: yield chunk_start, chunk_end chunk_start += CHUNK_SIZE chunk_end += CHUNK_SIZE read = Read(read_start=POS, seq=SEQ, cigar=CIGAR, mapping_quality=MAPQ, base_quality=QUAL, strand=STRAND, raw_base_quality=raw_base_quality, read_name=read_name, flag=FLAG, PNEXT=PNEXT, RNEXT=RNEXT, TLEN=TLEN, phasing=HP_TAG) if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): continue aligned_reads[read_name] = read if MAPQ < min_dbg_mapping_quality: continue advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == '=': reference_position += advance query_position += advance elif c == "M" or c == 'X': for _ in range(advance): if QUAL[query_position] >= min_dbg_base_quality: reference_base = reference_sequence[reference_position - reference_start_0_based] # 0 base query_base = SEQ[query_position] if reference_base in 'ACGT' and query_base != reference_base: pileup[reference_position]['X'] += 1 reference_position += 1 query_position += 1 elif c == "I" or c == 'S': pre_base = reference_sequence[reference_position - reference_start_0_based - 1] ins_base_quality = QUAL[query_position: query_position + advance] out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp if not out_of_region and pre_base in 'ACGT' and ( sum([True for bq in ins_base_quality if bq < min_dbg_base_quality]) == 0): # skip the bad seq start = reference_position - advance end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # insertion consumes query query_position += advance elif c == "D": out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp pre_base = reference_sequence[reference_position - reference_start_0_based - 1] # 0-base if not out_of_region and pre_base in 'ACGT': start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path))
tree = bed_tree_from(bed_file_path=bed_file_path)
1
2023-11-07 04:39:16+00:00
16k
the-siesta-group/edfio
tests/test_faq.py
[ { "identifier": "Edf", "path": "edfio/edf.py", "snippet": "class Edf:\n \"\"\"Python representation of an EDF file.\n\n EDF header fields are exposed as properties with appropriate data types (i.e.,\n string, numeric, date, or time objects). Fields that might break the file on\n modification (i.e., `version`, `bytes_in_header_record`, `reserved`,\n `num_data_records`, `data_record_duration`, and `num_signals`) can not be set after\n instantiation.\n\n Note that the startdate has to be set via the parameter `recording`.\n\n For writing an EDF file with a non-integer seconds duration, currently an\n appropriate value for `data_record_duration` has to be provided manually.\n\n Parameters\n ----------\n signals : Sequence[EdfSignal]\n The (non-annotation) signals to be contained in the EDF file.\n patient : Patient | None, default: None\n The \"local patient identification\", containing patient code, sex, birthdate,\n name, and optional additional fields. If `None`, the field is set to `X X X X`\n in accordance with EDF+ specs.\n recording : Recording | None, default: None\n The \"local recording identification\", containing recording startdate, hospital\n administration code, investigator/technical code, equipment code, and optional\n additional fields. If `None`, the field is set to `Startdate X X X X` in\n accordance with EDF+ specs.\n starttime : datetime.time | None, default: None\n The starttime of the recording. If `None`, `00.00.00` is used. If `starttime`\n contains microseconds, an EDF+C file is created.\n data_record_duration : float | None, default: None\n The duration of each data record in seconds. If `None`, an appropriate value is\n chosen automatically.\n annotations : Iterable[EdfAnnotation] | None, default: None\n The annotations, consisting of onset, duration (optional), and text. If not\n `None`, an EDF+C file is created.\n \"\"\"\n\n version = RawHeaderFieldInt(8)\n \"\"\"EDF version, always `0`\"\"\"\n local_patient_identification = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"\n Unparsed string representation of the legacy local patient identification.\n\n See also\n --------\n patient: Parsed representation, as a :class:`Patient` object.\n \"\"\"\n local_recording_identification = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"\n Unparsed string representation of the legacy local recording identification.\n\n See also\n --------\n recording: Parsed representation, as a :class:`Recording` object.\n \"\"\"\n _startdate = RawHeaderFieldDate(8, is_settable=True)\n _starttime = RawHeaderFieldTime(8, is_settable=True)\n bytes_in_header_record = RawHeaderFieldInt(8)\n \"\"\"Number of bytes in the header record.\"\"\"\n reserved = RawHeaderFieldStr(44)\n \"\"\"`\"EDF+C\"` for an EDF+C file, else `\"\"`.\"\"\"\n num_data_records = RawHeaderFieldInt(8)\n \"\"\"Number of data records in the recording.\"\"\"\n _data_record_duration = RawHeaderFieldFloat(8, is_settable=True)\n _num_signals = RawHeaderFieldInt(4, is_settable=True)\n\n def __init__(\n self,\n signals: Sequence[EdfSignal],\n *,\n patient: Patient | None = None,\n recording: Recording | None = None,\n starttime: datetime.time | None = None,\n data_record_duration: float | None = None,\n annotations: Iterable[EdfAnnotation] | None = None,\n ):\n if not signals and not annotations:\n raise ValueError(\"Edf must contain either signals or annotations\")\n if patient is None:\n patient = Patient()\n if recording is None:\n recording = Recording()\n if starttime is None:\n starttime = datetime.time(0, 0, 0)\n if data_record_duration is None:\n data_record_duration = _calculate_data_record_duration(signals)\n elif len(signals) == 0 and data_record_duration != 0:\n raise ValueError(\n \"Data record duration must be zero for annotation-only files\"\n )\n\n self._data_record_duration = data_record_duration\n self._set_num_data_records_with_signals(signals)\n self._version = Edf.version.encode(0)\n self.local_patient_identification = patient._to_str()\n self.local_recording_identification = recording._to_str()\n self._set_startdate_with_recording(recording)\n self._starttime = starttime.replace(microsecond=0)\n self._reserved = Edf.reserved.encode(\"\")\n if starttime.microsecond and annotations is None:\n warnings.warn(\"Creating EDF+C to store microsecond starttime.\")\n if annotations is not None or starttime.microsecond:\n signals = (\n *signals,\n _create_annotations_signal(\n annotations if annotations is not None else (),\n num_data_records=self.num_data_records,\n data_record_duration=self.data_record_duration,\n subsecond_offset=starttime.microsecond / 1_000_000,\n ),\n )\n self._reserved = Edf.reserved.encode(\"EDF+C\")\n self._set_signals(signals)\n\n def __repr__(self) -> str:\n signals_text = f\"{len(self.signals)} signal\"\n if len(self.signals) != 1:\n signals_text += \"s\"\n annotations_text = f\"{len(self.annotations)} annotation\"\n if len(self.annotations) != 1:\n annotations_text += \"s\"\n return f\"<Edf {signals_text} {annotations_text}>\"\n\n def _load_data(self, file: Path | io.BufferedReader | io.BytesIO) -> None:\n lens = [signal.samples_per_data_record for signal in self._signals]\n datarecord_len = sum(lens)\n if not isinstance(file, Path):\n datarecords = np.frombuffer(file.read(), dtype=np.int16)\n else:\n datarecords = np.memmap(\n file,\n dtype=np.int16,\n mode=\"r\",\n offset=self.bytes_in_header_record,\n )\n datarecords.shape = (self.num_data_records, datarecord_len)\n ends = np.cumsum(lens)\n starts = ends - lens\n\n for signal, start, end in zip(self._signals, starts, ends):\n signal._digital = datarecords[:, start:end].flatten()\n\n def _read_header(self, buffer: io.BufferedReader | io.BytesIO) -> None:\n for header_name, length in get_header_fields(Edf):\n setattr(self, \"_\" + header_name, buffer.read(length))\n self._signals = self._parse_signal_headers(buffer.read(256 * self._num_signals))\n\n @property\n def signals(self) -> tuple[EdfSignal, ...]:\n \"\"\"\n Ordinary signals contained in the recording.\n\n Annotation signals are excluded. Individual signals can not be removed, added,\n or replaced by modifying this property. Use :meth:`Edf.append_signals`,\n :meth:`Edf.drop_signals`, or :attr:`EdfSignal.data`, respectively.\n \"\"\"\n return tuple(s for s in self._signals if s.label != \"EDF Annotations\")\n\n def _set_signals(self, signals: Sequence[EdfSignal]) -> None:\n signals = tuple(signals)\n self._set_num_data_records_with_signals(signals)\n self._signals = signals\n self._bytes_in_header_record = Edf.bytes_in_header_record.encode(\n 256 * (len(signals) + 1)\n )\n self._num_signals = len(signals)\n if all(s.label == \"EDF Annotations\" for s in signals):\n self._data_record_duration = 0\n\n def _set_num_data_records_with_signals(\n self,\n signals: Sequence[EdfSignal],\n ) -> None:\n if not signals:\n num_data_records = 1\n else:\n signal_durations = [\n round(len(s._digital) / s.sampling_frequency, 12) for s in signals\n ]\n if any(v != signal_durations[0] for v in signal_durations[1:]):\n raise ValueError(\n f\"Inconsistent signal durations (in seconds): {signal_durations}\"\n )\n num_data_records = _calculate_num_data_records(\n signal_durations[0],\n self.data_record_duration,\n )\n signal_lengths = [len(s._digital) for s in signals]\n if any(l % num_data_records for l in signal_lengths):\n raise ValueError(\n f\"Not all signal lengths can be split into {num_data_records} data records: {signal_lengths}\"\n )\n self._num_data_records = Edf.num_data_records.encode(num_data_records)\n\n def _parse_signal_headers(self, raw_signal_headers: bytes) -> tuple[EdfSignal, ...]:\n raw_headers_split: dict[str, list[bytes]] = {}\n start = 0\n for header_name, length in get_header_fields(EdfSignal):\n end = start + length * self._num_signals\n raw_header = raw_signal_headers[start:end]\n raw_headers_split[header_name] = [\n raw_header[i : length + i] for i in range(0, len(raw_header), length)\n ]\n start = end\n signals = []\n for i in range(self._num_signals):\n raw_signal_header = {\n key: raw_headers_split[key][i] for key in raw_headers_split\n }\n try:\n sampling_frequency = (\n int(raw_signal_header[\"samples_per_data_record\"])\n / self.data_record_duration\n )\n except ZeroDivisionError:\n if raw_signal_header[\"_label\"].rstrip() == b\"EDF Annotations\":\n sampling_frequency = 0\n signals.append(\n EdfSignal._from_raw_header(sampling_frequency, **raw_signal_header)\n )\n return tuple(signals)\n\n def write(self, target: Path | str | io.BufferedWriter | io.BytesIO) -> None:\n \"\"\"\n Write an Edf to a file or file-like object.\n\n Parameters\n ----------\n target : Path | str | io.BufferedWriter | io.BytesIO\n The file location (path object or string) or file-like object to write to.\n \"\"\"\n if self.num_data_records == -1:\n warnings.warn(\"num_data_records=-1, determining correct value from data\")\n num_data_records = _calculate_num_data_records(\n len(self._signals[0]._digital) * self._signals[0].sampling_frequency,\n self.data_record_duration,\n )\n else:\n num_data_records = self.num_data_records\n for signal in self._signals:\n signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]\n len(signal._digital) // num_data_records\n )\n header_records = []\n for header_name, _ in get_header_fields(Edf):\n header_records.append(getattr(self, \"_\" + header_name))\n for header_name, _ in get_header_fields(EdfSignal):\n for signal in self._signals:\n header_records.append(getattr(signal, \"_\" + header_name))\n header_record = b\"\".join(header_records)\n\n lens = [signal.samples_per_data_record for signal in self._signals]\n ends = np.cumsum(lens)\n starts = ends - lens\n data_record = np.empty((num_data_records, sum(lens)), dtype=np.int16)\n for signal, start, end in zip(self._signals, starts, ends):\n data_record[:, start:end] = signal._digital.reshape((-1, end - start))\n\n if isinstance(target, str):\n target = Path(target)\n if isinstance(target, io.BufferedWriter):\n target.write(header_record)\n data_record.tofile(target)\n elif isinstance(target, io.BytesIO):\n target.write(header_record)\n target.write(data_record.tobytes())\n else:\n with target.expanduser().open(\"wb\") as file:\n file.write(header_record)\n data_record.tofile(file)\n\n @property\n def labels(self) -> tuple[str, ...]:\n \"\"\"\n The labels of all signals contained in the Edf.\n\n Returns\n -------\n tuple[str, ...]\n The labels, in order of the signals.\n \"\"\"\n return tuple(s.label for s in self.signals)\n\n def get_signal(self, label: str) -> EdfSignal:\n \"\"\"\n Retrieve a single signal by its label.\n\n The label has to be unique - a ValueError is raised if it is ambiguous or does\n not exist.\n\n Parameters\n ----------\n label : str\n A label identifying a single signal\n\n Returns\n -------\n EdfSignal\n The signal corresponding to the given label.\n \"\"\"\n count = self.labels.count(label)\n if count == 0:\n raise ValueError(\n f\"No signal with label {label!r}, possible options: {self.labels}\"\n )\n if count > 1:\n indices = [i for i, l in enumerate(self.labels) if l == label]\n raise ValueError(f\"Ambiguous label {label!r} identifies indices {indices}\")\n return self.signals[self.labels.index(label)]\n\n @property\n def patient(self) -> Patient:\n \"\"\"\n Parsed object representation of the local patient identification.\n\n See :class:`Patient` for information on its attributes.\n \"\"\"\n return Patient._from_str(self.local_patient_identification)\n\n @patient.setter\n def patient(self, patient: Patient) -> None:\n self.local_patient_identification = patient._to_str()\n\n @property\n def recording(self) -> Recording:\n \"\"\"\n Parsed object representation of the local recording identification.\n\n See :class:`Recording` for information on its attributes.\n \"\"\"\n return Recording._from_str(self.local_recording_identification)\n\n @recording.setter\n def recording(self, recording: Recording) -> None:\n self._set_startdate_with_recording(recording)\n self.local_recording_identification = recording._to_str()\n\n @property\n def startdate(self) -> datetime.date:\n \"\"\"\n Recording startdate.\n\n If the :attr:`local_recording_identification` conforms to the EDF+ standard, the\n startdate provided there is used. If not, this falls back to the legacy\n :attr:`startdate` field. If both differ, a warning is issued and the EDF+ field\n is preferred. Raises an `AnonymizedDateError` if the EDF+ field is anonymized\n (i.e., begins with `Startdate X`).\n \"\"\"\n with contextlib.suppress(Exception):\n if self._startdate != self.recording.startdate:\n warnings.warn(\n f\"Different values in startdate fields: {self._startdate}, {self.recording.startdate}\"\n )\n try:\n return self.recording.startdate\n except AnonymizedDateError:\n raise\n except ValueError:\n return self._startdate\n\n @startdate.setter\n def startdate(self, startdate: datetime.date) -> None:\n self._startdate = startdate\n try:\n self.recording.startdate # noqa: B018\n except AnonymizedDateError:\n pass\n except Exception:\n return\n recording_subfields = self.local_recording_identification.split()\n recording_subfields[1] = encode_edfplus_date(startdate)\n self.local_recording_identification = \" \".join(recording_subfields)\n\n @property\n def _subsecond_offset(self) -> float:\n try:\n timekeeping_raw = self._timekeeping_signal._digital.tobytes()\n first_data_record = timekeeping_raw[: timekeeping_raw.find(b\"\\x00\") + 1]\n return _EdfAnnotationsDataRecord.from_bytes(first_data_record).tals[0].onset\n except StopIteration:\n return 0\n\n @property\n def starttime(self) -> datetime.time:\n \"\"\"\n Recording starttime.\n\n In EDF+ files, microsecond accuracy is supported.\n \"\"\"\n subsecond_offset = self._subsecond_offset\n try:\n return self._starttime.replace(\n microsecond=round(subsecond_offset * 1000000)\n )\n except ValueError as e:\n raise ValueError(\n f\"Subsecond offset in first annotation must be 0.X, is {subsecond_offset}\"\n ) from e\n\n @starttime.setter\n def starttime(self, starttime: datetime.time) -> None:\n onset_change = starttime.microsecond / 1000000 - self._subsecond_offset\n self._starttime = starttime.replace(microsecond=0)\n if starttime.microsecond != self.starttime.microsecond:\n timekeeping_signal = self._timekeeping_signal\n data_records = []\n for data_record in timekeeping_signal._digital.reshape(\n (-1, timekeeping_signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n for tal in annot_dr.tals:\n tal.onset = round(tal.onset + onset_change, 12)\n data_records.append(annot_dr.to_bytes())\n maxlen = max(len(data_record) for data_record in data_records)\n if maxlen % 2:\n maxlen += 1\n raw = b\"\".join(dr.ljust(maxlen, b\"\\x00\") for dr in data_records)\n timekeeping_signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]\n maxlen // 2\n )\n timekeeping_signal._sampling_frequency = (\n maxlen // 2 * self.data_record_duration\n )\n timekeeping_signal._digital = np.frombuffer(raw, dtype=np.int16)\n\n def _set_startdate_with_recording(self, recording: Recording) -> None:\n try:\n self._startdate = recording.startdate\n except AnonymizedDateError:\n self._startdate = datetime.date(1985, 1, 1)\n\n @property\n def data_record_duration(self) -> float:\n \"\"\"Duration of each data record in seconds.\"\"\"\n return self._data_record_duration\n\n def update_data_record_duration(\n self,\n data_record_duration: float,\n method: Literal[\"strict\", \"pad\", \"truncate\"] = \"strict\",\n ) -> None:\n \"\"\"\n Update the data record duration.\n\n This operation will fail if the new duration is incompatible with the current\n sampling frequencies.\n\n Parameters\n ----------\n data_record_duration : float\n The new data record duration in seconds.\n method : `{\"strict\", \"pad\", \"truncate\"}`, default: `\"strict\"`\n How to handle the case where the new duration does not divide the Edf\n duration evenly\n\n - \"strict\": Raise a ValueError\n - \"pad\": Pad the data with zeros to the next compatible duration. If zero\n is outside the physical range, data is padded with the physical minimum.\n - \"truncate\": Truncate the data to the previous compatible duration (might\n lead to loss of data)\n \"\"\"\n if data_record_duration == self.data_record_duration:\n return\n if data_record_duration <= 0:\n raise ValueError(\n f\"Data record duration must be positive, got {data_record_duration}\"\n )\n if not self.signals:\n raise ValueError(\n \"Data record duration must be zero for annotation-only files\"\n )\n for signal in self.signals:\n spr = signal.sampling_frequency * data_record_duration\n if spr % 1:\n raise ValueError(\n f\"Cannot set data record duration to {data_record_duration}: Incompatible sampling frequency {signal.sampling_frequency} Hz\"\n )\n\n num_data_records = self._pad_or_truncate_signals(data_record_duration, method)\n self._update_record_duration_in_annotation_signals(\n data_record_duration, num_data_records\n )\n self._data_record_duration = data_record_duration\n self._num_data_records = Edf.num_data_records.encode(num_data_records)\n\n @property\n def num_signals(self) -> int:\n \"\"\"Return the number of signals, excluding annotation signals for EDF+.\"\"\"\n return len(self.signals)\n\n def _pad_or_truncate_signals(\n self, data_record_duration: float, method: Literal[\"strict\", \"pad\", \"truncate\"]\n ) -> int:\n if method == \"pad\":\n new_duration = (\n ceil(self.duration / data_record_duration) * data_record_duration\n )\n self._pad_or_truncate_data(new_duration)\n return round(new_duration / data_record_duration)\n if method == \"truncate\":\n new_duration = (\n floor(self.duration / data_record_duration) * data_record_duration\n )\n self._pad_or_truncate_data(new_duration)\n return round(new_duration / data_record_duration)\n return _calculate_num_data_records(self.duration, data_record_duration)\n\n def _update_record_duration_in_annotation_signals(\n self, data_record_duration: float, num_data_records: int\n ) -> None:\n signals = list(self._signals)\n for idx, signal in enumerate(self._signals):\n if signal not in self._annotation_signals:\n continue\n annotations = []\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if signal is self._timekeeping_signal:\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n signals[idx] = _create_annotations_signal(\n [\n EdfAnnotation(a.onset - self._subsecond_offset, a.duration, a.text)\n for a in annotations\n ],\n num_data_records=num_data_records,\n data_record_duration=data_record_duration,\n with_timestamps=signal is self._timekeeping_signal,\n subsecond_offset=self._subsecond_offset,\n )\n self._signals = tuple(signals)\n\n def _pad_or_truncate_data(self, new_duration: float) -> None:\n for signal in self.signals:\n n_samples = round(new_duration * signal.sampling_frequency)\n diff = n_samples - len(signal._digital)\n if diff > 0:\n physical_pad_value = 0.0\n if signal.physical_min > 0 or signal.physical_max < 0:\n physical_pad_value = signal.physical_min\n signal._set_data(\n np.pad(signal.data, (0, diff), constant_values=physical_pad_value)\n )\n elif diff < 0:\n signal._set_data(signal.data[:diff])\n\n def anonymize(self) -> None:\n \"\"\"\n Anonymize a recording.\n\n Header fields are modified as follows:\n - local patient identification is set to `X X X X`\n - local recording identification is set to `Startdate X X X X`\n - startdate is set to `01.01.85`\n - starttime is set to `00.00.00`\n\n For EDF+ files, subsecond starttimes specified via an annotations signal are\n removed.\n \"\"\"\n self.patient = Patient()\n self.recording = Recording()\n self.starttime = datetime.time(0, 0, 0)\n\n def drop_signals(self, drop: Iterable[int | str]) -> None:\n \"\"\"\n Drop signals by index or label.\n\n Signal indices (int) and labels (str) can be provided in the same iterable. For\n ambiguous labels, all corresponding signals are dropped. Raises a ValueError if\n at least one of the provided identifiers does not correspond to a signal.\n\n Parameters\n ----------\n drop : Iterable[int | str]\n The signals to drop, identified by index or label.\n \"\"\"\n if isinstance(drop, str):\n drop = [drop]\n selected: list[EdfSignal] = []\n dropped: list[int | str] = []\n i = 0\n for signal in self._signals:\n if signal.label == \"EDF Annotations\":\n selected.append(signal)\n continue\n if i in drop or signal.label in drop:\n dropped.append(i)\n dropped.append(signal.label)\n else:\n selected.append(signal)\n i += 1\n if not_dropped := set(drop) - set(dropped):\n raise ValueError(f\"No signal found with index/label {not_dropped}\")\n self._signals = tuple(selected)\n self._bytes_in_header_record = Edf.bytes_in_header_record.encode(\n 256 * (len(selected) + 1)\n )\n self._num_signals = len(selected)\n\n def append_signals(self, new_signals: EdfSignal | Iterable[EdfSignal]) -> None:\n \"\"\"\n Append one or more signal(s) to the Edf recording.\n\n Every signal must be compatible with the current `data_record_duration` and all\n signal durations must match the overall recording duration. For recordings\n containing EDF+ annotation signals, the new signals are inserted after the last\n ordinary (i.e. non-annotation) signal.\n\n Parameters\n ----------\n new_signals : EdfSignal | Iterable[EdfSignal]\n The signal(s) to add.\n \"\"\"\n if isinstance(new_signals, EdfSignal):\n new_signals = [new_signals]\n last_ordinary_index = 0\n for i, signal in enumerate(self._signals):\n if signal.label != \"EDF Annotations\":\n last_ordinary_index = i\n self._set_signals(\n [\n *self._signals[: last_ordinary_index + 1],\n *new_signals,\n *self._signals[last_ordinary_index + 1 :],\n ]\n )\n\n @property\n def _annotation_signals(self) -> Iterable[EdfSignal]:\n return (signal for signal in self._signals if signal.label == \"EDF Annotations\")\n\n @property\n def _timekeeping_signal(self) -> EdfSignal:\n return next(iter(self._annotation_signals))\n\n @property\n def duration(self) -> float:\n \"\"\"Recording duration in seconds.\"\"\"\n return self.num_data_records * self.data_record_duration\n\n @property\n def annotations(self) -> tuple[EdfAnnotation, ...]:\n \"\"\"\n All annotations contained in the Edf, sorted chronologically.\n\n Does not include timekeeping annotations.\n \"\"\"\n annotations: list[EdfAnnotation] = []\n for i, signal in enumerate(self._annotation_signals):\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if i == 0:\n # from https://www.edfplus.info/specs/edfplus.html#timekeeping:\n # The first annotation of the first 'EDF Annotations' signal in each\n # data record is empty, but its timestamp specifies how many seconds\n # after the file startdate/time that data record starts.\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n subsecond_offset = self._subsecond_offset\n annotations = [\n EdfAnnotation(\n round(ann.onset - subsecond_offset, 12), ann.duration, ann.text\n )\n for ann in annotations\n ]\n return tuple(sorted(annotations))\n\n def drop_annotations(self, text: str) -> None:\n \"\"\"\n Drop annotations with a given text.\n\n Parameters\n ----------\n text : str\n All annotations whose text exactly matches this parameter are removed.\n \"\"\"\n for signal in self._annotation_signals:\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annotations = _EdfAnnotationsDataRecord.from_bytes(\n data_record.tobytes()\n )\n annotations.drop_annotations_with_text(text)\n data_record[:] = np.frombuffer(\n annotations.to_bytes().ljust(len(data_record) * 2, b\"\\x00\"),\n dtype=np.int16,\n )\n\n def to_bytes(self) -> bytes:\n \"\"\"\n Convert an Edf to a `bytes` object.\n\n Returns\n -------\n bytes\n The binary representation of the Edf object (i.e., what a file created with\n `Edf.write` would contain).\n \"\"\"\n stream = io.BytesIO()\n self.write(stream)\n stream.seek(0)\n return stream.read()\n\n def slice_between_seconds(\n self,\n start: float,\n stop: float,\n *,\n keep_all_annotations: bool = False,\n ) -> None:\n \"\"\"\n Slice to the interval between two times.\n\n The sample point corresponding to `stop` is excluded. `start` and `stop` are\n given in seconds from recording start and have to correspond exactly to a sample\n time in all non-annotation signals.\n\n Parameters\n ----------\n start : float\n Start time in seconds from recording start.\n stop : float\n Stop time in seconds from recording start.\n keep_all_annotations : bool, default: False\n If set to `True`, annotations outside the selected time interval are kept.\n \"\"\"\n signals: list[EdfSignal] = []\n self._verify_seconds_inside_recording_time(start)\n self._verify_seconds_inside_recording_time(stop)\n self._verify_seconds_coincide_with_sample_time(start)\n self._verify_seconds_coincide_with_sample_time(stop)\n self._num_data_records = Edf.num_data_records.encode(\n int((stop - start) / self.data_record_duration)\n )\n for signal in self._signals:\n if signal.label == \"EDF Annotations\":\n signals.append(\n self._slice_annotations_signal(\n signal,\n start=start,\n stop=stop,\n keep_all_annotations=keep_all_annotations,\n )\n )\n else:\n start_index = start * signal.sampling_frequency\n stop_index = stop * signal.sampling_frequency\n signal._digital = signal._digital[int(start_index) : int(stop_index)]\n signals.append(signal)\n self._set_signals(signals)\n self._shift_startdatetime(int(start))\n\n def slice_between_annotations(\n self,\n start_text: str,\n stop_text: str,\n *,\n keep_all_annotations: bool = False,\n ) -> None:\n \"\"\"\n Slice to the interval between two EDF+ annotations.\n\n The sample point corresponding to the onset of the annotation identified by\n `stop_text` is excluded. `start_text` and `stop_text` each have to uniquely\n identify a single annotation, whose onset corresponds exactly to a sample time\n in all non-annotation signals.\n\n Parameters\n ----------\n start_text : str\n Text identifying the start annotation.\n stop_text : str\n Text identifying the stop annotation.\n keep_all_annotations : bool, default: False\n If set to `True`, annotations outside the selected time interval are kept.\n \"\"\"\n self.slice_between_seconds(\n self._get_annotation_by_text(start_text).onset,\n self._get_annotation_by_text(stop_text).onset,\n keep_all_annotations=keep_all_annotations,\n )\n\n def _get_annotation_by_text(self, text: str) -> EdfAnnotation:\n matches = []\n for annotation in self.annotations:\n if annotation.text == text:\n matches.append(annotation)\n if len(matches) == 1:\n return matches[0]\n if len(matches) > 1:\n raise ValueError(\n f\"Ambiguous annotation text {text!r}, found {len(matches)} matches\"\n )\n raise ValueError(f\"No annotation found with text {text!r}\")\n\n def _verify_seconds_inside_recording_time(self, seconds: float) -> None:\n if not 0 <= seconds <= self.duration:\n raise ValueError(\n f\"{seconds} is an invalid slice time for recording duration {self.duration}\"\n )\n\n def _verify_seconds_coincide_with_sample_time(self, seconds: float) -> None:\n for i, signal in enumerate(self.signals):\n index = seconds * signal.sampling_frequency\n if index != int(index):\n raise ValueError(\n f\"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz\"\n )\n\n def _shift_startdatetime(self, seconds: float) -> None:\n timedelta = datetime.timedelta(seconds=seconds)\n try:\n startdate = self.startdate\n startdate_anonymized = False\n except AnonymizedDateError:\n startdate = datetime.date.fromtimestamp(0)\n startdate_anonymized = True\n startdatetime = datetime.datetime.combine(startdate, self.starttime)\n startdatetime += timedelta\n if not startdate_anonymized:\n self.startdate = startdatetime.date()\n self.starttime = startdatetime.time()\n\n def copy(self) -> Edf:\n \"\"\"\n Create a deep copy of the Edf.\n\n Returns\n -------\n Edf\n The copied Edf object.\n \"\"\"\n return copy.deepcopy(self)\n\n def _slice_annotations_signal(\n self,\n signal: EdfSignal,\n *,\n start: float,\n stop: float,\n keep_all_annotations: bool,\n ) -> EdfSignal:\n is_timekeeping_signal = signal == self._timekeeping_signal\n annotations: list[EdfAnnotation] = []\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if is_timekeeping_signal:\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n annotations = [\n EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)\n for a in annotations\n if keep_all_annotations or start <= a.onset < stop\n ]\n return _create_annotations_signal(\n annotations,\n num_data_records=self.num_data_records,\n data_record_duration=self.data_record_duration,\n with_timestamps=is_timekeeping_signal,\n subsecond_offset=self._subsecond_offset + start - int(start),\n )" }, { "identifier": "EdfSignal", "path": "edfio/edf.py", "snippet": "class EdfSignal:\n \"\"\"A single EDF signal.\n\n Attributes that might break the signal or file on modification (i.e.,\n `sampling_frequency`, `physical_range`, `digital_range`, `samples_per_data_record`,\n and `reserved`) can not be set after instantiation.\n\n To reduce memory consumption, signal data is always stored as a 16-bit integer array\n containing the digital values that would be written to the corresponding EDF file.\n Therefore, it is expected that `EdfSignal.data` does not match the physical\n values passed during instantiation exactly.\n\n Parameters\n ----------\n data : npt.NDArray[np.float64]\n The signal data (physical values).\n sampling_frequency : float\n The sampling frequency in Hz.\n label : str, default: `\"\"`\n The signal's label, e.g., `\"EEG Fpz-Cz\"` or `\"Body temp\"`.\n transducer_type : str, default: `\"\"`\n The transducer type, e.g., `\"AgAgCl electrode\"`.\n physical_dimension : str, default: `\"\"`\n The physical dimension, e.g., `\"uV\"` or `\"degreeC\"`\n physical_range : tuple[float, float] | None, default: None\n The physical range given as a tuple of `(physical_min, physical_max)`. If\n `None`, this is determined from the data.\n digital_range : tuple[int, int], default: `(-32768, 32767)`\n The digital range given as a tuple of `(digital_min, digital_max)`. Uses the\n maximum resolution of 16-bit integers by default.\n prefiltering : str, default: `\"\"`\n The signal prefiltering, e.g., `\"HP:0.1Hz LP:75Hz\"`.\n \"\"\"\n\n _label = RawHeaderFieldStr(16, is_settable=True)\n transducer_type = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"Transducer type, e.g., `\"AgAgCl electrode\"`.\"\"\"\n physical_dimension = RawHeaderFieldStr(8, is_settable=True)\n \"\"\"Physical dimension, e.g., `\"uV\"` or `\"degreeC\"`.\"\"\"\n physical_min = RawHeaderFieldFloat(8)\n \"\"\"Physical minimum, e.g., `-500` or `34`.\"\"\"\n physical_max = RawHeaderFieldFloat(8)\n \"\"\"Physical maximum, e.g., `500` or `40`.\"\"\"\n digital_min = RawHeaderFieldInt(8)\n \"\"\"Digital minimum, e.g., `-2048`.\"\"\"\n digital_max = RawHeaderFieldInt(8)\n \"\"\"Digital maximum, e.g., `2047`.\"\"\"\n prefiltering = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"Signal prefiltering, e.g., `\"HP:0.1Hz LP:75Hz\"`.\"\"\"\n samples_per_data_record = RawHeaderFieldInt(8)\n \"\"\"\n Number of samples in each data record.\n\n For newly instantiated :class:`EdfSignal` objects, this is only set once\n :meth:`Edf.write` is called.\n \"\"\"\n reserved = RawHeaderFieldStr(32)\n \"\"\"Reserved signal header field, always `\"\"`\"\"\"\n\n def __init__(\n self,\n data: npt.NDArray[np.float64],\n sampling_frequency: float,\n *,\n label: str = \"\",\n transducer_type: str = \"\",\n physical_dimension: str = \"\",\n physical_range: tuple[float, float] | None = None,\n digital_range: tuple[int, int] = (-32768, 32767),\n prefiltering: str = \"\",\n ):\n self._sampling_frequency = sampling_frequency\n self.label = label\n self.transducer_type = transducer_type\n self.physical_dimension = physical_dimension\n self.prefiltering = prefiltering\n self._reserved = EdfSignal.reserved.encode(\"\")\n if not np.all(np.isfinite(data)):\n raise ValueError(\"Signal data must contain only finite values\")\n self._set_physical_range(physical_range, data)\n self._set_digital_range(digital_range)\n self._set_data(data)\n\n def __repr__(self) -> str:\n info = f\"{self.sampling_frequency:g}Hz\"\n if self.label:\n info = f\"{self.label} \" + info\n return f\"<EdfSignal {info}>\"\n\n @classmethod\n def _from_raw_header(\n cls,\n sampling_frequency: float,\n *,\n _label: bytes,\n transducer_type: bytes,\n physical_dimension: bytes,\n physical_min: bytes,\n physical_max: bytes,\n digital_min: bytes,\n digital_max: bytes,\n prefiltering: bytes,\n samples_per_data_record: bytes,\n reserved: bytes,\n ) -> EdfSignal:\n sig = object.__new__(cls)\n sig._sampling_frequency = sampling_frequency\n sig._label = EdfSignal._label.decode(_label) # type: ignore[attr-defined]\n sig._transducer_type = transducer_type # type: ignore[attr-defined]\n sig._physical_dimension = physical_dimension # type: ignore[attr-defined]\n sig._physical_min = physical_min # type: ignore[attr-defined]\n sig._physical_max = physical_max # type: ignore[attr-defined]\n sig._digital_min = digital_min # type: ignore[attr-defined]\n sig._digital_max = digital_max # type: ignore[attr-defined]\n sig._prefiltering = prefiltering # type: ignore[attr-defined]\n sig._samples_per_data_record = samples_per_data_record # type: ignore[attr-defined]\n sig._reserved = reserved # type: ignore[attr-defined]\n return sig\n\n @classmethod\n def from_hypnogram(\n cls,\n stages: npt.NDArray[np.float64],\n stage_duration: float = 30,\n *,\n label: str = \"\",\n ) -> EdfSignal:\n \"\"\"Create an EDF signal from a hypnogram, with scaling according to EDF specs.\n\n According to the EDF FAQ [1]_, use integer numbers 0, 1, 2, 3, 4, 5, 6, and 9\n for sleep stages W, 1, 2, 3, 4, R, MT, und unscored, respectively. The digital\n range is set to `(0, 9)`.\n\n Parameters\n ----------\n stages : npt.NDArray[np.float64]\n The sleep stages, coded as integer numbers.\n stage_duration : float, default: `30`\n The duration of each sleep stage in seconds, used to set the sampling\n frequency to its inverse.\n label : str, default: `\"\"`\n The signal's label.\n\n Returns\n -------\n EdfSignal\n The resulting :class:`EdfSignal` object.\n\n References\n ----------\n .. [1] EDF FAQ, https://www.edfplus.info/specs/edffaq.html\n \"\"\"\n allowed_stages = {0, 1, 2, 3, 4, 5, 6, 9}\n if invalid_stages := set(stages) - allowed_stages:\n raise ValueError(f\"stages contains invalid values: {invalid_stages}\")\n return EdfSignal(\n data=stages,\n sampling_frequency=1 / stage_duration,\n label=label,\n physical_range=(0, 9),\n digital_range=(0, 9),\n )\n\n @property\n def label(self) -> str:\n \"\"\"Signal label, e.g., `\"EEG Fpz-Cz\"` or `\"Body temp\"`.\"\"\"\n return self._label\n\n @label.setter\n def label(self, label: str) -> None:\n if label == \"EDF Annotations\":\n raise ValueError(\"Ordinary signal label must not be 'EDF Annotations'.\")\n self._label = label\n\n @property\n def physical_range(self) -> FloatRange:\n \"\"\"The physical range as a tuple of `(physical_min, physical_max)`.\"\"\"\n return FloatRange(self.physical_min, self.physical_max)\n\n @property\n def digital_range(self) -> IntRange:\n \"\"\"The digital range as a tuple of `(digital_min, digital_max)`.\"\"\"\n return IntRange(self.digital_min, self.digital_max)\n\n @property\n def sampling_frequency(self) -> float:\n \"\"\"The sampling frequency in Hz.\"\"\"\n return self._sampling_frequency\n\n @property\n def data(self) -> npt.NDArray[np.float64]:\n \"\"\"\n Numpy array containing the physical signal values as floats.\n\n To simplify avoiding inconsistencies between signal data and header fields,\n individual values in the returned array can not be modified. Use\n :meth:`EdfSignal.update_data` to overwrite with new physical data.\n \"\"\"\n try:\n gain, offset = calculate_gain_and_offset(\n self.digital_min,\n self.digital_max,\n self.physical_min,\n self.physical_max,\n )\n except ZeroDivisionError:\n data = self._digital.astype(np.float64)\n warnings.warn(\n f\"Digital minimum equals digital maximum ({self.digital_min}) for {self.label}, returning uncalibrated signal.\"\n )\n except ValueError:\n data = self._digital.astype(np.float64)\n else:\n data = (self._digital + offset) * gain\n data.setflags(write=False)\n return data\n\n def update_data(\n self,\n data: npt.NDArray[np.float64],\n *,\n keep_physical_range: bool = False,\n sampling_frequency: float | None = None,\n ) -> None:\n \"\"\"\n Overwrite physical signal values with an array of equal length.\n\n Parameters\n ----------\n data : npt.NDArray[np.float64]\n The new physical data.\n keep_physical_range : bool, default: False\n If `True`, the `physical_range` is not modified to accomodate the new data.\n sampling_frequency : float | None, default: None\n If not `None`, the `sampling_frequency` is updated to the new value. The new\n data must match the expected length for the new sampling frequency.\n \"\"\"\n expected_length = len(self._digital)\n if (\n sampling_frequency is not None\n and sampling_frequency != self._sampling_frequency\n ):\n expected_length = self._get_expected_new_length(sampling_frequency)\n if len(data) != expected_length:\n raise ValueError(\n f\"Signal lengths must match: got {len(data)}, expected {len(self._digital)}.\"\n )\n physical_range = self.physical_range if keep_physical_range else None\n self._set_physical_range(physical_range, data)\n if sampling_frequency is not None:\n self._sampling_frequency = sampling_frequency\n self._set_data(data)\n\n def _get_expected_new_length(self, sampling_frequency: float) -> int:\n if sampling_frequency <= 0:\n raise ValueError(\n f\"Sampling frequency must be positive, got {sampling_frequency}\"\n )\n current_length = len(self._digital)\n expected_length_f = (\n sampling_frequency / self._sampling_frequency * current_length\n )\n if not math.isclose(expected_length_f, round(expected_length_f), rel_tol=1e-10):\n raise ValueError(\n f\"Sampling frequency of {sampling_frequency} results in non-integer number of samples ({expected_length_f})\"\n )\n return round(expected_length_f)\n\n def _set_digital_range(self, digital_range: tuple[int, int]) -> None:\n digital_range = IntRange(*digital_range)\n if digital_range.min == digital_range.max:\n raise ValueError(\n f\"Digital minimum ({digital_range.min}) must differ from digital maximum ({digital_range.max}).\"\n )\n self._digital_min = EdfSignal.digital_min.encode(digital_range.min)\n self._digital_max = EdfSignal.digital_max.encode(digital_range.max)\n\n def _set_physical_range(\n self,\n physical_range: tuple[float, float] | None,\n data: npt.NDArray[np.float64],\n ) -> None:\n if physical_range is None:\n physical_range = FloatRange(data.min(), data.max())\n if physical_range.min == physical_range.max:\n physical_range = FloatRange(physical_range.min, physical_range.max + 1)\n else:\n physical_range = FloatRange(*physical_range)\n if physical_range.min == physical_range.max:\n raise ValueError(\n f\"Physical minimum ({physical_range.min}) must differ from physical maximum ({physical_range.max}).\"\n )\n data_min = data.min()\n data_max = data.max()\n if data_min < physical_range.min or data_max > physical_range.max:\n raise ValueError(\n f\"Signal range [{data_min}, {data_max}] out of physical range: [{physical_range.min}, {physical_range.max}]\"\n )\n self._physical_min = EdfSignal.physical_min.encode(\n round_float_to_8_characters(physical_range.min, math.floor)\n )\n self._physical_max = EdfSignal.physical_max.encode(\n round_float_to_8_characters(physical_range.max, math.ceil)\n )\n\n def _set_data(self, data: npt.NDArray[np.float64]) -> None:\n gain, offset = calculate_gain_and_offset(\n self.digital_min,\n self.digital_max,\n self.physical_min,\n self.physical_max,\n )\n self._digital = np.round(data / gain - offset).astype(np.int16)" }, { "identifier": "read_edf", "path": "edfio/edf.py", "snippet": "def read_edf(edf_file: Path | str | io.BufferedReader | io.BytesIO | bytes) -> Edf:\n \"\"\"\n Read an EDF file into an :class:`Edf` object.\n\n Parameters\n ----------\n edf_file : Path | str | io.BufferedReader | io.BytesIO\n The file location (path object or string) or file-like object to read from.\n\n Returns\n -------\n Edf\n The resulting :class:`Edf` object.\n \"\"\"\n return _read_edf(edf_file)" }, { "identifier": "RawHeaderFieldDate", "path": "edfio/_header_field.py", "snippet": "class RawHeaderFieldDate(RawHeaderField[datetime.date]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> datetime.date:\n date = decode_str(field)\n match = DATE_OR_TIME_PATTERN.fullmatch(date)\n if match is None:\n raise ValueError(f\"Invalid date for format DD.MM.YY: {date!r}\")\n day, month, year = (int(g) for g in match.groups())\n if year >= 85: # noqa: PLR2004\n year += 1900\n else:\n year += 2000\n return datetime.date(year, month, day)\n\n def encode(self, value: datetime.date) -> bytes:\n if not 1985 <= value.year <= 2084: # noqa: PLR2004\n raise ValueError(\"EDF only allows dates from 1985 to 2084\")\n return encode_str(value.strftime(\"%d.%m.%y\"), self.length)" }, { "identifier": "RawHeaderFieldFloat", "path": "edfio/_header_field.py", "snippet": "class RawHeaderFieldFloat(RawHeaderField[float]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> float:\n return decode_float(field)\n\n def encode(self, value: float) -> bytes:\n return encode_float(value, self.length)" }, { "identifier": "RawHeaderFieldTime", "path": "edfio/_header_field.py", "snippet": "class RawHeaderFieldTime(RawHeaderField[datetime.time]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> datetime.time:\n time = decode_str(field)\n match = DATE_OR_TIME_PATTERN.fullmatch(time)\n if match is None:\n raise ValueError(f\"Invalid time for format hh.mm.ss: {time!r}\")\n hours, minutes, seconds = (int(g) for g in match.groups())\n return datetime.time(hours, minutes, seconds)\n\n def encode(self, value: datetime.time) -> bytes:\n return encode_str(value.isoformat().replace(\":\", \".\"), self.length)" } ]
import datetime import numpy as np import pytest from pathlib import Path from edfio import Edf, EdfSignal, read_edf from edfio._header_field import ( RawHeaderFieldDate, RawHeaderFieldFloat, RawHeaderFieldTime, )
12,268
""" Tests to verify the adherence to the EDF FAQ: https://www.edfplus.info/specs/edffaq.html """ def test_q1_create_edf_signal_with_non_printable_character_in_label_fails(): with pytest.raises(ValueError, match="contains non-printable characters"):
""" Tests to verify the adherence to the EDF FAQ: https://www.edfplus.info/specs/edffaq.html """ def test_q1_create_edf_signal_with_non_printable_character_in_label_fails(): with pytest.raises(ValueError, match="contains non-printable characters"):
EdfSignal(np.arange(10.1), 1, label="\t")
1
2023-11-09 09:53:27+00:00
16k
sb-ai-lab/HypEx
hypex/matcher.py
[ { "identifier": "FaissMatcher", "path": "hypex/algorithms/faiss_matcher.py", "snippet": "class FaissMatcher:\n \"\"\"A class used to match instances using Faiss library.\"\"\"\n\n def __init__(\n self,\n df: pd.DataFrame,\n outcomes: str,\n treatment: str,\n info_col: list,\n features: [list, pd.DataFrame] = None,\n group_col: str = None,\n weights: dict = None,\n sigma: float = 1.96,\n validation: bool = None,\n n_neighbors: int = 10,\n silent: bool = True,\n pbar: bool = True,\n ):\n \"\"\"Construct all the necessary attributes.\n\n Args:\n df:\n The input dataframe\n outcomes:\n The target column name\n treatment:\n The column name with treatment\n info_col:\n A list with informational column names\n features:\n A list with names of feature using to matching. Defaults to None\n group_col:\n The column for stratification. Defaults to None\n weights:\n Dict with wight of features to matching. If you would like that matching will be more for\n 1 feature and less for another one\n sigma:\n The significant level for confidence interval calculation Defaults to 1.96\n validation:\n The flag for validation of estimated ATE with default method `random_feature`\n n_neighbors:\n The number of neighbors to find for each object. Defaults to 10\n silent:\n Write logs in debug mode\n pbar:\n Display progress bar while get index\n \"\"\"\n self.n_neighbors = n_neighbors\n if group_col is None:\n self.df = df\n else:\n self.df = df.sort_values([treatment, group_col])\n self.columns_del = [outcomes]\n if info_col:\n self.info_col = info_col\n else:\n self.info_col = []\n\n if self.info_col is not None:\n self.columns_del = self.columns_del + [x for x in self.info_col if x in self.df.columns]\n self.outcomes = outcomes if type(outcomes) == list else [outcomes]\n self.treatment = treatment\n\n if features is None:\n self.columns_match = list(\n set([x for x in list(self.df.columns) if x not in self.info_col] + [self.treatment] + self.outcomes)\n )\n else:\n try:\n self.columns_match = features[\"Feature\"].tolist() + [self.treatment] + self.outcomes\n except TypeError:\n self.columns_match = features + [self.treatment] + self.outcomes\n\n self.features_quality = (\n self.df.drop(columns=[self.treatment] + self.outcomes + self.info_col)\n .select_dtypes(include=[\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"])\n .columns\n )\n self.dict_outcome_untreated = {}\n self.dict_outcome_treated = {}\n self.group_col = group_col\n self.weights = weights\n self.treated_index = None\n self.untreated_index = None\n self.orig_treated_index = None\n self.orig_untreated_index = None\n self.results = {}\n self.ATE = None\n self.sigma = sigma\n self.quality_dict = {}\n self.rep_dict = None\n self.validation = validation\n self.silent = silent\n self.pbar = pbar\n self.tqdm = None\n self.results = pd.DataFrame()\n\n def __getstate__(self) -> dict:\n \"\"\"Prepare the object for serialization.\n\n This method is called when the object is about to be serialized.\n It removes the `tqdm` attribute from the object's dictionary\n because `tqdm` objects cannot be serialized.\n\n Returns:\n A copy of the object's dictionary with the `tqdm` attribute removed.\n \"\"\"\n state = self.__dict__.copy()\n if \"tqdm\" in state:\n del state[\"tqdm\"]\n return state\n\n def __setstate__(self, state: dict):\n \"\"\"Restore the object after deserialization.\n\n This method is called when the object is deserialized.\n It adds the `tqdm` attribute back to the object's dictionary\n if the `pbar` attribute is True.\n\n Args:\n state:\n The deserialized state of the object\n \"\"\"\n if \"pbar\" in state and state[\"pbar\"]:\n state[\"tqdm\"] = None\n self.__dict__.update(state)\n\n def _get_split(self, df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"Creates split data by treatment column.\n\n Separate treatment column with 1 (treated) an 0 (untreated),\n scales and transforms treatment column\n\n Args:\n df:\n The input dataframe\n\n Returns:\n Tuple of dataframes - one for treated (df[self.treatment] == 1]) and\n one for untreated (df[self.treatment] == 0]). Drops self.outcomes and\n `self.treatment` columns\n\n \"\"\"\n logger.debug(\"Creating split data by treatment column\")\n\n treated = df[df[self.treatment] == 1].drop([self.treatment] + self.outcomes, axis=1)\n untreated = df[df[self.treatment] == 0].drop([self.treatment] + self.outcomes, axis=1)\n\n return treated, untreated\n\n def _predict_outcome(self, std_treated: pd.DataFrame, std_untreated: pd.DataFrame):\n \"\"\"Applies LinearRegression to input arrays.\n\n Calculate biases of treated and untreated values,\n creates dict of y - regular, matched and without bias.\n\n Args:\n std_treated:\n The dataframe of treated data\n std_untreated:\n The dataframe of untreated data\n\n \"\"\"\n logger.debug(\"Predicting target by Linear Regression\")\n\n start_time = dt.datetime.now()\n logger.debug(\"start --\")\n\n self.dict_outcome_untreated = {}\n self.dict_outcome_treated = {}\n df = self.df.drop(columns=self.info_col)\n\n for outcome in self.outcomes:\n y_untreated = df[df[self.treatment] == 0][outcome].to_numpy()\n y_treated = df[df[self.treatment] == 1][outcome].to_numpy()\n\n x_treated = std_treated.to_numpy()\n x_untreated = std_untreated.to_numpy()\n y_match_treated = np.array([y_untreated[idx].mean() for idx in self.treated_index])\n y_match_untreated = np.array([y_treated[idx].mean() for idx in self.untreated_index])\n x_match_treated = np.array([x_untreated[idx].mean(0) for idx in self.treated_index])\n x_match_untreated = np.array([x_treated[idx].mean(0) for idx in self.untreated_index])\n bias_coefs_c = bias_coefs(self.untreated_index, y_treated, x_treated)\n bias_coefs_t = bias_coefs(self.treated_index, y_untreated, x_untreated)\n bias_c = bias(x_untreated, x_match_untreated, bias_coefs_c)\n bias_t = bias(x_treated, x_match_treated, bias_coefs_t)\n\n y_match_treated_bias = y_treated - y_match_treated + bias_t\n y_match_untreated_bias = y_match_untreated - y_untreated - bias_c\n\n self.dict_outcome_untreated[outcome] = y_untreated\n self.dict_outcome_untreated[outcome + POSTFIX] = y_match_untreated\n self.dict_outcome_untreated[outcome + POSTFIX_BIAS] = y_match_untreated_bias\n\n self.dict_outcome_treated[outcome] = y_treated\n self.dict_outcome_treated[outcome + POSTFIX] = y_match_treated\n self.dict_outcome_treated[outcome + POSTFIX_BIAS] = y_match_treated_bias\n\n end_time = dt.datetime.now()\n total = dt.datetime.strptime(str(end_time - start_time), \"%H:%M:%S.%f\").strftime(\"%H:%M:%S\")\n logger.debug(f\"end -- [work time{total}]\")\n\n def _create_outcome_matched_df(self, dict_outcome: dict, is_treated: bool) -> pd.DataFrame:\n \"\"\"Creates dataframe with outcomes values and treatment.\n\n Args:\n dict_outcome:\n A dictionary containing outcomes\n is_treated:\n A boolean value indicating whether the outcome is treated or not\n\n Returns:\n A dataframe with matched outcome and treatment columns\n\n \"\"\"\n df_pred = pd.DataFrame(dict_outcome)\n df_pred[self.treatment] = int(is_treated)\n df_pred[self.treatment + POSTFIX] = int(not is_treated)\n\n return df_pred\n\n def _create_features_matched_df(self, index: np.ndarray, is_treated: bool) -> pd.DataFrame:\n \"\"\"Creates matched dataframe with features.\n\n Args:\n index:\n An array of indices\n is_treated:\n A boolean value indicating whether the outcome is treated or not\n\n\n Returns:\n A dataframe of matched features\n\n \"\"\"\n df = self.df.drop(columns=self.outcomes + self.info_col)\n\n if self.group_col is None:\n untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()\n converted_index = [untreated_index[i] for i in index]\n filtered = df.loc[df[self.treatment] == int(not is_treated)].values\n untreated_df = pd.DataFrame(\n data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=df.columns\n ) # добавить дату в данные и пофиксить баги с этим (тут ломалось)\n if self.info_col is not None and len(self.info_col) != 1:\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n else:\n ids = self.df[df[self.treatment] == int(not is_treated)][self.info_col].values.ravel()\n converted_index = [ids[i] for i in index]\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n treated_df[\"index\"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()\n else:\n df = df.sort_values([self.treatment, self.group_col])\n untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()\n converted_index = [untreated_index[i] for i in index]\n filtered = df.loc[df[self.treatment] == int(not is_treated)]\n cols_untreated = [col for col in filtered.columns if col != self.group_col]\n filtered = filtered.drop(columns=self.group_col).to_numpy()\n untreated_df = pd.DataFrame(\n data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=cols_untreated\n )\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n grp = treated_df[self.group_col]\n untreated_df[self.group_col] = grp\n if self.info_col is not None and len(self.info_col) != 1:\n untreated_df[\"index\"] = pd.Series(converted_index)\n else:\n ids = (\n self.df[df[self.treatment] == int(not is_treated)]\n .sort_values([self.treatment, self.group_col])[self.info_col]\n .values.ravel()\n )\n converted_index = [ids[i] for i in index]\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df[\"index\"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()\n untreated_df.columns = [col + POSTFIX for col in untreated_df.columns]\n\n x = pd.concat([treated_df, untreated_df], axis=1).drop(\n columns=[self.treatment, self.treatment + POSTFIX], axis=1\n )\n return x\n\n def _create_matched_df(self) -> pd.DataFrame:\n \"\"\"Creates matched df of features and outcome.\n\n Returns:\n Matched dataframe\n \"\"\"\n df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)\n df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)\n\n df_matched = pd.concat([df_pred_treated, df_pred_untreated])\n\n treated_x = self._create_features_matched_df(self.treated_index, True)\n untreated_x = self._create_features_matched_df(self.untreated_index, False)\n\n untreated_x = pd.concat([treated_x, untreated_x])\n\n columns = list(untreated_x.columns) + list(df_matched.columns)\n\n df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)\n df_matched.columns = columns\n\n return df_matched\n\n def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:\n \"\"\"Calculates Average Treatment Effect for the control group (ATC).\n\n Effect on control group if it was affected\n\n Args:\n df:\n Input dataframe\n outcome:\n The outcome to be considered for treatment effect\n\n Returns:\n Contains ATC, scaled counts, and variances as numpy arrays\n\n \"\"\"\n logger.debug(\"Calculating ATC\")\n\n df = df[df[self.treatment] == 0]\n N_c = len(df)\n ITT_c = df[outcome + POSTFIX_BIAS]\n scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)\n\n vars_c = np.repeat(ITT_c.var(), N_c) # conservative\n atc = ITT_c.mean()\n\n return atc, scaled_counts_c, vars_c\n\n def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:\n \"\"\"Calculates Average Treatment Effect for the treated (ATT).\n\n Args:\n df:\n Input dataframe\n outcome:\n The outcome to be considered for treatment effect\n\n Returns:\n Contains ATT, scaled counts, and variances as numpy arrays\n\n \"\"\"\n logger.debug(\"Calculating ATT\")\n\n df = df[df[self.treatment] == 1]\n N_t = len(df)\n ITT_t = df[outcome + POSTFIX_BIAS]\n scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)\n\n vars_t = np.repeat(ITT_t.var(), N_t) # conservative\n att = ITT_t.mean()\n\n return att, scaled_counts_t, vars_t\n\n def _calculate_ate_all_target(self, df: pd.DataFrame):\n \"\"\"Creates dictionaries of all effect: ATE, ATC, ATT.\n\n Args:\n df:\n Input dataframe\n\n \"\"\"\n logger.debug(\"Creating dicts of all effects: ATE, ATC, ATT\")\n\n att_dict = {}\n atc_dict = {}\n ate_dict = {}\n N = len(df)\n N_t = df[self.treatment].sum()\n N_c = N - N_t\n\n for outcome in self.outcomes:\n att, scaled_counts_t, vars_t = self.calc_att(df, outcome)\n atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)\n ate = (N_c / N) * atc + (N_t / N) * att\n\n att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)\n atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)\n ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)\n\n ate_dict[outcome] = [\n ate,\n ate_se,\n pval_calc(ate / ate_se),\n ate - self.sigma * ate_se,\n ate + self.sigma * ate_se,\n ]\n atc_dict[outcome] = [\n atc,\n atc_se,\n pval_calc(atc / atc_se),\n atc - self.sigma * atc_se,\n atc + self.sigma * atc_se,\n ]\n att_dict[outcome] = [\n att,\n att_se,\n pval_calc(att / att_se),\n att - self.sigma * att_se,\n att + self.sigma * att_se,\n ]\n\n self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict\n self.val_dict = ate_dict\n\n def matching_quality(self, df_matched) -> Dict[str, Union[Dict[str, float], float]]:\n \"\"\"Estimated the quality of covariates balance and repeat fraction.\n\n Calculates population stability index,Standardized mean difference\n and Kolmogorov-Smirnov test for numeric values. Returns a dictionary of reports.\n\n Args:\n df_matched:\n Matched DataFrame to calculate quality\n\n Returns:\n dictionary containing PSI, KS-test, SMD data and repeat fractions\n\n \"\"\"\n if self.silent:\n logger.debug(\"Estimating quality of matching\")\n else:\n logger.info(\"Estimating quality of matching\")\n\n psi_columns = set(self.columns_match)\n psi_columns = list(psi_columns - set([self.treatment] + self.outcomes))\n psi_data, ks_data, smd_data = matching_quality(\n df_matched, self.treatment, sorted(self.features_quality), sorted(psi_columns), self.silent\n )\n\n rep_dict = {\n \"match_control_to_treat\": check_repeats(np.concatenate(self.treated_index), silent=self.silent),\n \"match_treat_to_control\": check_repeats(np.concatenate(self.untreated_index), silent=self.silent),\n }\n\n self.quality_dict = {\"psi\": psi_data, \"ks_test\": ks_data, \"smd\": smd_data, \"repeats\": rep_dict}\n\n rep_df = pd.DataFrame.from_dict(rep_dict, orient=\"index\").rename(columns={0: \"value\"})\n self.rep_dict = rep_df\n\n if self.silent:\n logger.debug(f\"PSI info: \\n {psi_data.head(10)} \\nshape:{psi_data.shape}\")\n logger.debug(f\"Kolmogorov-Smirnov test info: \\n {ks_data.head(10)} \\nshape:{ks_data.shape}\")\n logger.debug(f\"Standardised mean difference info: \\n {smd_data.head(10)} \\nshape:{smd_data.shape}\")\n logger.debug(f\"Repeats info: \\n {rep_df.head(10)}\")\n else:\n logger.info(f\"PSI info: \\n {psi_data.head(10)} \\nshape:{psi_data.shape}\")\n logger.info(f\"Kolmogorov-Smirnov test info: \\n {ks_data.head(10)} \\nshape:{ks_data.shape}\")\n logger.info(f\"Standardised mean difference info: \\n {smd_data.head(10)} \\nshape:{smd_data.shape}\")\n logger.info(f\"Repeats info: \\n {rep_df.head(10)}\")\n\n return self.quality_dict\n\n def group_match(self):\n \"\"\"Matches the dataframe if it divided by groups.\n\n Returns:\n A tuple containing the matched dataframe and metrics such as ATE, ATT and ATC\n\n \"\"\"\n df = self.df.drop(columns=self.info_col)\n groups = sorted(df[self.group_col].unique())\n matches_c = []\n matches_t = []\n group_arr_c = df[df[self.treatment] == 0][self.group_col].to_numpy()\n group_arr_t = df[df[self.treatment] == 1][self.group_col].to_numpy()\n treat_arr_c = df[df[self.treatment] == 0][self.treatment].to_numpy()\n treat_arr_t = df[df[self.treatment] == 1][self.treatment].to_numpy()\n\n if self.pbar:\n self.tqdm = tqdm(total=len(groups) * 2)\n\n for group in groups:\n df_group = df[df[self.group_col] == group]\n temp = df_group[self.columns_match + [self.group_col]]\n temp = temp.loc[:, (temp != 0).any(axis=0)].drop(columns=self.group_col)\n treated, untreated = self._get_split(temp)\n\n std_treated_np, std_untreated_np = _transform_to_np(treated, untreated, self.weights)\n\n if self.pbar:\n self.tqdm.set_description(desc=f\"Get untreated index by group {group}\")\n matches_u_i = _get_index(std_treated_np, std_untreated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(1)\n self.tqdm.set_description(desc=f\"Get treated index by group {group}\")\n matches_t_i = _get_index(std_untreated_np, std_treated_np, self.n_neighbors)\n if self.pbar:\n self.tqdm.update(1)\n self.tqdm.refresh()\n\n group_mask_c = group_arr_c == group\n group_mask_t = group_arr_t == group\n matches_c_mask = np.arange(treat_arr_t.shape[0])[group_mask_t]\n matches_u_i = [matches_c_mask[i] for i in matches_u_i]\n matches_t_mask = np.arange(treat_arr_c.shape[0])[group_mask_c]\n matches_t_i = [matches_t_mask[i] for i in matches_t_i]\n matches_c.extend(matches_u_i)\n matches_t.extend(matches_t_i)\n\n if self.pbar:\n self.tqdm.close()\n\n self.untreated_index = matches_c\n self.treated_index = matches_t\n\n df_group = df[self.columns_match].drop(columns=self.group_col)\n treated, untreated = self._get_split(df_group)\n self._predict_outcome(treated, untreated)\n df_matched = self._create_matched_df()\n self._calculate_ate_all_target(df_matched)\n\n if self.validation:\n return self.val_dict\n\n return self.report_view(), df_matched\n\n def match(self):\n \"\"\"Matches the dataframe.\n\n Returns:\n A tuple containing the matched dataframe and metrics such as ATE, ATT and ATC\n\n \"\"\"\n if self.group_col is not None:\n return self.group_match()\n\n df = self.df[self.columns_match]\n treated, untreated = self._get_split(df)\n\n std_treated_np, std_untreated_np = _transform_to_np(treated, untreated, self.weights)\n\n if self.pbar:\n self.tqdm = tqdm(total=len(std_treated_np) + len(std_untreated_np))\n self.tqdm.set_description(desc=\"Get untreated index\")\n\n untreated_index = _get_index(std_treated_np, std_untreated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(len(std_treated_np))\n self.tqdm.set_description(desc=\"Get treated index\")\n treated_index = _get_index(std_untreated_np, std_treated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(len(std_untreated_np))\n self.tqdm.refresh()\n self.tqdm.close()\n\n self.untreated_index = untreated_index\n self.treated_index = treated_index\n\n self._predict_outcome(treated, untreated)\n\n df_matched = self._create_matched_df()\n self._calculate_ate_all_target(df_matched)\n\n if self.validation:\n return self.val_dict\n\n return self.report_view(), df_matched\n\n def report_view(self) -> pd.DataFrame:\n \"\"\"Formats the ATE, ATC, and ATT results into a Pandas DataFrame for easy viewing.\n\n Returns:\n DataFrame containing ATE, ATC, and ATT results\n \"\"\"\n result = (self.ATE, self.ATC, self.ATT)\n\n for outcome in self.outcomes:\n res = pd.DataFrame(\n [x[outcome] + [outcome] for x in result],\n columns=[\"effect_size\", \"std_err\", \"p-val\", \"ci_lower\", \"ci_upper\", \"outcome\"],\n index=[\"ATE\", \"ATC\", \"ATT\"],\n )\n self.results = pd.concat([self.results, res])\n return self.results" }, { "identifier": "MatcherNoReplacement", "path": "hypex/algorithms/no_replacement_matching.py", "snippet": "class MatcherNoReplacement:\n \"\"\"Matching groups with no replacement.\n\n Realized by optimizing the linear sum of distances between pairs of treatment and\n control samples.\n \"\"\"\n\n def __init__(self, X: pd.DataFrame, a: pd.Series, weights: dict = None, approximate_match: bool = False):\n \"\"\"Initialize matching.\n\n Args:\n X: features dataframe\n a: series of treatment value\n weights: weights for numeric columns in order to increase matching quality.\n approximate_match: use or not approximate matching\n \"\"\"\n self.treatment = a\n self.X = X\n self.weights = weights\n self.approximate_match = approximate_match\n\n def match(self):\n \"\"\"Function run matching with no replacement.\n\n Returns:\n Dataframe of matched indexes.\n \"\"\"\n matches = {}\n cov = conditional_covariance(self.X[self.treatment == 1].values, self.X[self.treatment == 0].values)\n distance_matrix = self._get_distance_matrix(self.X[self.treatment == 1], self.X[self.treatment == 0], cov)\n source_array, neighbor_array_indices, distances = optimally_match_distance_matrix(distance_matrix)\n source_df = self.X[self.treatment == 1].iloc[np.array(source_array)]\n target_df = self.X[self.treatment == 0].iloc[np.array(neighbor_array_indices)]\n\n matches[1] = self.create_match_df(self.treatment, source_df, target_df, distances)\n matches[0] = self.create_match_df(self.treatment, target_df, source_df, distances)\n\n match_df = pd.concat(matches, sort=True)\n return match_df\n\n def create_match_df(\n self, base_series: pd.Series, source_df: pd.DataFrame, target_df: pd.DataFrame, distances: list\n ) -> pd.DataFrame:\n \"\"\"Function creates matching dataframe.\n\n Args:\n base_series: series of treatment value.\n source_df: dataframe of sources indexes.\n target_df: dataframe of target indexes.\n distances: matrix of calculated distances.\n\n Returns:\n Matched dataframe of indexes.\n \"\"\"\n match_sub_df = pd.DataFrame(\n index=base_series.index,\n columns=[\n \"matches\",\n \"distances\",\n ],\n data=base_series.apply(lambda x: pd.Series([[], []])).values,\n dtype=\"object\",\n )\n\n # matching from source to target: read distances\n match_sub_df.loc[source_df.index] = pd.DataFrame(\n data=dict(\n matches=[[tidx] for tidx in target_df.index],\n distances=distances,\n ),\n index=source_df.index,\n )\n\n # matching from target to target: fill with zeros\n match_sub_df.loc[target_df.index] = pd.DataFrame(\n data=dict(\n matches=[[tidx] for tidx in target_df.index],\n distances=[[0]] * len(distances),\n ),\n index=target_df.index,\n )\n return match_sub_df\n\n def _get_metric_dict(self, cov: np.ndarray) -> dict:\n \"\"\"Function calculates correct feature space and generate metrics dist for cdist calculation.\n\n Args:\n cov: Matrix of covariations.\n\n Returns:\n Metric dictionary\n \"\"\"\n metric_dict = dict(metric=\"mahalanobis\")\n mahalanobis_transform = np.linalg.inv(cov)\n if self.weights is not None:\n features = self.X.columns\n w_list = np.array([self.weights[col] if col in self.weights.keys() else 1 for col in features])\n w_matrix = np.sqrt(np.diag(w_list / w_list.sum()))\n mahalanobis_transform = np.dot(w_matrix, mahalanobis_transform)\n\n metric_dict[\"VI\"] = mahalanobis_transform\n return metric_dict\n\n def _get_distance_matrix(self, source_df: pd.DataFrame, target_df: pd.DataFrame, cov: np.ndarray) -> np.ndarray:\n \"\"\"Create distance matrix for no replacement match.\n\n Combines metric and source/target data into a\n precalculated distance matrix which can be passed to\n scipy.optimize.linear_sum_assignment.\n\n Args:\n source_df: source feature dataframe.\n target_df: target feature dataframe.\n cov: matrix of covariations.\n\n Returns:\n Matrix of distances.\n \"\"\"\n cdist_args = dict(XA=_ensure_array_columnlike(source_df.values), XB=_ensure_array_columnlike(target_df.values))\n cdist_args.update(self._get_metric_dict(cov))\n\n if self.approximate_match:\n if len(cdist_args['XB']) < len(cdist_args['XA']):\n covariance_matrix = np.cov(cdist_args['XB'].T)\n else:\n covariance_matrix = np.cov(cdist_args['XA'].T)\n covariance_matrix_reg = covariance_matrix + np.eye(covariance_matrix.shape[0]) * 1e-8\n\n distance_matrix = np.zeros((cdist_args['XA'].shape[0], cdist_args['XB'].shape[0]))\n for i, x in enumerate(cdist_args['XA']):\n distance_matrix[i] = _m_distance(cdist_args['XB'], x, np.linalg.inv(covariance_matrix_reg))\n else:\n distance_matrix = distance.cdist(**cdist_args)\n return distance_matrix" }, { "identifier": "FeatureSelector", "path": "hypex/selectors/feature_selector.py", "snippet": "class FeatureSelector:\n \"\"\"Class of LAMA Feature selector. Select top features. By default, use LGM.\n # TODO: write some feature selector\"\"\"\n\n def __init__(\n self,\n outcome: str,\n outcome_type: str,\n treatment: str,\n timeout: int,\n n_threads: int,\n n_folds: int,\n verbose: bool, # не используется\n generate_report: bool,\n report_dir: str,\n use_algos: List[str],\n ):\n \"\"\"Initialize the LamaFeatureSelector.\n\n Args:\n outcome:\n The target column\n outcome_type:\n The type of target column\n treatment:\n The column that determines control and test groups\n timeout:\n Time limit for the execution of the code\n n_threads:\n Maximum number of threads to be used\n n_folds:\n Number of folds for cross-validation\n verbose:\n Flag to control the verbosity of the process stages\n generate_report:\n Flag to control whether to create a report or not\n report_dir:\n Directory for storing report files\n use_algos:\n List of names of LAMA algorithms for feature selection\n \"\"\"\n self.outcome = outcome\n self.outcome_type = outcome_type\n self.treatment = treatment\n self.use_algos = use_algos\n self.timeout = timeout\n self.n_threads = n_threads\n self.n_folds = n_folds\n self.verbose = verbose\n self.generate_report = generate_report\n self.report_dir = report_dir\n\n def perform_selection(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Trains a model and returns feature scores.\n\n This method defines metrics, applies the model, creates a report, and returns feature scores\n\n Args:\n df:\n Input data\n\n Returns:\n A DataFrame containing the feature scores from the model\n\n \"\"\"\n roles = {\n \"target\": self.outcome,\n \"drop\": [self.treatment],\n }\n\n if self.outcome_type == \"numeric\":\n task_name = \"reg\"\n loss = \"mse\"\n metric = \"mse\"\n elif self.outcome_type == \"binary\":\n task_name = \"binary\"\n loss = \"logloss\"\n metric = \"logloss\"\n else:\n task_name = \"multiclass\"\n loss = \"crossentropy\"\n metric = \"crossentropy\"\n\n features_scores = []\n\n return features_scores" }, { "identifier": "SpearmanFilter", "path": "hypex/selectors/spearman_filter.py", "snippet": "class SpearmanFilter:\n \"\"\"Class to filter columns based on the Spearman correlation coefficient.\n\n The class is utilized to filter dataframe columns that do not exhibit a significant\n correlation (based on a provided threshold) with a specified outcome column.\n The significance of the correlation is determined using the Spearman correlation coefficient\n and a p-value threshold of 0.05\n \"\"\"\n\n def __init__(self, outcome: str, treatment: str, threshold: float):\n \"\"\"Initialize spearman filter.\n\n Args:\n outcome:\n The name of target column\n treatment:\n The name of the column that determines control and test groups\n threshold:\n The threshold for the Spearman correlation coefficient filter\n \"\"\"\n self.outcome: str = outcome\n self.treatment: str = treatment\n self.threshold: float = threshold\n\n def perform_filter(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Filters columns based on their correlation with the outcome column.\n\n The method tests the correlation using the Spearman correlation coefficient.\n Columns that have an absolute correlation coefficient value less than the provided threshold,\n and a p-value less than 0.05, are considered insignificant and are removed from the dataframe\n\n Args:\n df:\n The input DataFrame\n\n Returns:\n The filtered DataFrame, containing only columns that\n are significantly correlated with the outcome column\n \"\"\"\n selected = []\n columns = df.drop([self.treatment, self.outcome], 1).columns\n for column in columns:\n result = spearmanr(df[self.outcome].values, df[column].values)\n if (abs(result[0] < self.threshold)) and (result[1] < PVALUE):\n selected.append(column)\n\n logger.info(f\"Drop columns {list(set(columns) - set(selected))}\")\n\n columns = selected + [self.treatment, self.outcome]\n df = df[columns]\n\n return df" }, { "identifier": "OutliersFilter", "path": "hypex/selectors/outliers_filter.py", "snippet": "class OutliersFilter:\n \"\"\"Class of Outliers Filter. It creates a row indices that should be deleted by percentile.\"\"\"\n\n def __init__(self, interquartile_coeff, mode_percentile, min_percentile, max_percentile):\n \"\"\"Initializes the OutliersFilter.\n\n Args:\n interquartile_coeff:\n Coefficient for the interquartile range to determine outliers\n mode_percentile:\n If True, outliers are determined by custom percentiles\n min_percentile:\n The lower percentile. Values below this percentile are considered outliers.\n max_percentile:\n The upper percentile. Values above this percentile are considered outliers\n \"\"\"\n self.interquartile_coeff = interquartile_coeff\n self.mode_percentile = mode_percentile\n self.min_percentile = min_percentile\n self.max_percentile = max_percentile\n\n def perform_filter(self, df: pd.DataFrame, interquartile: bool = True) -> pd.DataFrame:\n \"\"\"Identifies rows with outliers.\n\n This method creates a set of row indices to be removed, which contains values less than\n `min_percentile` and larger than `max_percentile` (if `mode_percentile` is True), or values\n smaller than the 0.2 and larget than 0.8 (if `mode_percentile` is False)\n\n Args:\n df:\n The input DataFrame\n interquartile:\n If True, uses the interquartile range to determine outliers. Defaults to True\n\n Returns:\n The set of row indices with outliers\n \"\"\"\n columns_names = df.select_dtypes(include=\"number\").columns\n rows_for_del = []\n for column in columns_names:\n if self.mode_percentile:\n min_value = df[column].quantile(self.min_percentile)\n max_value = df[column].quantile(self.max_percentile)\n elif interquartile:\n upper_quantile = df[column].quantile(0.8)\n lower_quantile = df[column].quantile(0.2)\n\n interquartile_range = upper_quantile - lower_quantile\n min_value = lower_quantile - self.interquartile_coeff * interquartile_range\n max_value = upper_quantile + self.interquartile_coeff * interquartile_range\n else:\n mean_value = df[column].mean()\n standard_deviation = df[column].std()\n nstd_lower, nstd_upper = 3, 3\n\n min_value = mean_value - nstd_lower * standard_deviation\n max_value = mean_value + nstd_upper * standard_deviation\n\n rows_for_del_column = (df[column] < min_value) | (df[column] > max_value)\n rows_for_del_column = df.index[rows_for_del_column].tolist()\n rows_for_del.extend(rows_for_del_column)\n rows_for_del = list(set(rows_for_del))\n logger.info(f\"Drop {len(rows_for_del)} rows\")\n return df.drop(rows_for_del)" }, { "identifier": "const_filtration", "path": "hypex/selectors/base_filtration.py", "snippet": "def const_filtration(X: pd.DataFrame, threshold: float = 0.95) -> list:\n \"\"\"Function removes features consist of constant value on 95%.\n\n Args:\n X: related dataset\n threshold: constant fill rate, default is 0.95\n\n Returns:\n List of filtered columns\n \"\"\"\n is_const = pd.Series(0, index=X.columns, dtype=np.dtype(bool))\n for col in X.columns:\n # NaNs are not counted using unique (since np.nan != np.nan). Fill them with a unique value:\n cur_col = X.loc[:, col]\n cur_col.loc[~np.isfinite(cur_col)] = cur_col.max() + 1\n # Get values' frequency:\n freqs = cur_col.value_counts(normalize=True)\n is_const[col] = np.any(freqs > threshold)\n\n selected_features = ~is_const\n if np.sum(selected_features) == 0:\n raise AssertionError(\"All features were removed by constant filtration.\")\n else:\n return X.loc[:, selected_features].columns.to_list()" }, { "identifier": "nan_filtration", "path": "hypex/selectors/base_filtration.py", "snippet": "def nan_filtration(X: pd.DataFrame, threshold: float = 0.8):\n \"\"\"Function removes features consist of NaN value on 80%.\n\n Args:\n X: related dataset\n threshold: constant fill rate, default is 0.95\n\n Returns:\n List of filtered columns\n \"\"\"\n nan_freqs = np.mean(pd.isnull(X), axis=0)\n is_sparse = nan_freqs > threshold\n selected_features = ~is_sparse\n if np.sum(selected_features) == 0:\n raise AssertionError(\"All features were removed by nan filtration.\")\n else:\n return X.loc[:, selected_features].columns.to_list()" }, { "identifier": "random_feature", "path": "hypex/utils/validators.py", "snippet": "def random_feature(df: pd.DataFrame):\n \"\"\"Adds a random feature to the initial dataset.\n\n Args:\n df:\n The initial dataframe\n\n Returns:\n The modified dataframe with an additional random feature\n A validation flag\n \"\"\"\n feature = np.random.normal(0, 1, size=len(df))\n validate = 1\n df[\"random_feature\"] = feature\n return df, validate" }, { "identifier": "random_treatment", "path": "hypex/utils/validators.py", "snippet": "def random_treatment(df: pd.DataFrame, treatment: str):\n \"\"\"Replaces real treatment with a random placebo treatment.\n\n Args:\n df:\n The initial dataframe\n treatment:\n The columns name representing the treatment\n\n Returns:\n The modified dataframe with the original treatment replaced\n The original treatment series\n A validation flag\n \"\"\"\n prop1 = df[treatment].sum() / df.shape[0]\n prop0 = 1 - prop1\n new_treatment = np.random.choice([0, 1], size=df.shape[0], p=[prop0, prop1])\n validate = 1\n orig_treatment = df[treatment]\n df = df.drop(columns=treatment)\n df[treatment] = new_treatment\n return df, orig_treatment, validate" }, { "identifier": "subset_refuter", "path": "hypex/utils/validators.py", "snippet": "def subset_refuter(df: pd.DataFrame, treatment: str, fraction: float = 0.8):\n \"\"\"Returns a subset of data with given fraction (default 0.8).\n\n Args:\n df:\n The initial dataframe\n treatment:\n The column name representing the treatment\n fraction:\n The fraction of the dataset to divide random matching\n\n Returns:\n The subset of the dataframe\n A validation flag\n \"\"\"\n df = df.groupby(treatment, group_keys=False).apply(lambda x: x.sample(frac=fraction))\n validate = 1\n return df, validate" }, { "identifier": "test_significance", "path": "hypex/utils/validators.py", "snippet": "def test_significance(estimate: float, simulations: List) -> float:\n \"\"\"Performs a significance test for a normal distribution.\n\n Args:\n estimate:\n The estimated effect\n simulations:\n A list of estimated effects from each simulation\n\n Returns:\n The p-value of the test\n \"\"\"\n mean_refute_value = np.mean(simulations)\n std_dev_refute_values = np.std(simulations)\n z_score = (estimate - mean_refute_value) / std_dev_refute_values\n\n if z_score > 0: # Right Tail\n p_value = 1 - st.norm.cdf(z_score)\n else: # Left Tail\n p_value = st.norm.cdf(z_score)\n\n return p_value" } ]
import logging import pickle import numpy as np import pandas as pd from typing import Union from tqdm.auto import tqdm from .algorithms.faiss_matcher import FaissMatcher from .algorithms.no_replacement_matching import MatcherNoReplacement from .selectors.feature_selector import FeatureSelector from .selectors.spearman_filter import SpearmanFilter from .selectors.outliers_filter import OutliersFilter from .selectors.base_filtration import const_filtration, nan_filtration from .utils.validators import random_feature from .utils.validators import random_treatment from .utils.validators import subset_refuter from .utils.validators import test_significance
12,854
if self.info_col is not None: df = df.drop(columns=self.info_col) features = feat_select.perform_selection(df=df) if self.group_col is None: self.features_importance = features else: self.features_importance = features.append( {"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True ) return self.features_importance.sort_values("Importance", ascending=False) def _create_faiss_matcher(self, df=None, validation=None): """Creates a FaissMatcher object. Args: df: The dataframe to use. If None, uses self.input_data. validation: Whether to use the matcher for validation. If None, determines based on whether """ if df is None: df = self.input_data self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, weights=self.weights, features=self.features_importance, group_col=self.group_col, validation=validation, n_neighbors=self.n_neighbors, pbar=False if validation else self.pbar, ) def _perform_validation(self): """Performs validation using the FaissMatcher.""" if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature":
"""Base Matcher class.""" REPORT_FEAT_SELECT_DIR = "report_feature_selector" REPORT_PROP_MATCHER_DIR = "report_matcher" NAME_REPORT = "lama_interactive_report.html" N_THREADS = 1 N_FOLDS = 4 RANDOM_STATE = 123 TEST_SIZE = 0.2 TIMEOUT = 600 VERBOSE = 2 USE_ALGOS = ["lgb"] PROP_SCORES_COLUMN = "prop_scores" GENERATE_REPORT = True SAME_TARGET_THRESHOLD = 0.7 OUT_INTER_COEFF = 1.5 OUT_MODE_PERCENT = True OUT_MIN_PERCENT = 0.02 OUT_MAX_PERCENT = 0.98 logger = logging.getLogger("hypex") console_out = logging.StreamHandler() logging.basicConfig( handlers=(console_out,), format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s", datefmt="%d.%m.%Y %H:%M:%S", level=logging.INFO, ) class Matcher: """Class for compile full pipeline of Matching in Causal Inference task. Matcher steps: - Read, analyze data - Feature selection via LightAutoML - Converting a dataset with features to another space via Cholesky decomposition In the new space, the distance L2 becomes equivalent to the Mahalanobis distance. This allows us to use faiss to search for nearest objects, which can search only by L2 metric, but without violating the methodology of matching, for which it is important to count by the Mahalanobis distance - Finding the nearest neighbors for each unit (with duplicates) using faiss. For each of the control group, neighbors from the target group are matched and vice versa. - Calculation bias - Creating matched df (Wide df with pairs) - Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals - Calculation quality: PS-test, KS test, SMD test - Returns metrics as dataframe, quality results as dict of df's and df_matched - After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result` Example: Common usecase - base pipeline for matching >>> # Base info >>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1 >>> target = "target" # Column name with target >>> >>> # Optional >>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative. >>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature) >>> >>> # Matching >>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col) >>> features = model.lama_feature_select() # Feature selection via lama >>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching >>> >>> model.validate_result() """ def __init__( self, input_data: pd.DataFrame, treatment: str, outcome: Union[str, list] = None, outcome_type: str = "numeric", group_col: str = None, info_col: list = None, weights: dict = None, base_filtration: bool = False, generate_report: bool = GENERATE_REPORT, report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR, timeout: int = TIMEOUT, n_threads: int = N_THREADS, n_folds: int = N_FOLDS, verbose: bool = VERBOSE, use_algos: list = None, same_target_threshold: float = SAME_TARGET_THRESHOLD, interquartile_coeff: float = OUT_INTER_COEFF, drop_outliers_by_percentile: bool = OUT_MODE_PERCENT, min_percentile: float = OUT_MIN_PERCENT, max_percentile: float = OUT_MAX_PERCENT, n_neighbors: int = 1, silent: bool = True, pbar: bool = True, ): """Initialize the Matcher object. Args: input_data: Input dataframe outcome: Target column treatment: Column determine control and test groups outcome_type: Values type of target column. Defaults to "numeric" group_col: Column for grouping. Defaults to None. info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration: filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = np.concatenate( ( self.dropped_features, [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop], ) ) self.input_data = self.input_data[filtered_features + columns_to_drop] self._log("Categorical features turned into dummy") def _apply_filter(self, filter_class, *filter_args): """Applies a filter to the input data. Args: filter_class: The class of the filter to apply. *filter_args: Arguments to pass to the filter class. """ filter_instance = filter_class(*filter_args) self.input_data = filter_instance.perform_filter(self.input_data) def _spearman_filter(self): """Applies a filter by dropping columns correlated with the outcome column. This method uses the Spearman filter to eliminate features from the dataset that are highly correlated with the outcome columns, based on a pre-set threshold """ self._log("Applying filter by spearman test - drop columns correlated with outcome") self._apply_filter(SpearmanFilter, self.outcomes[0], self.treatment, self.same_target_threshold) def outliers_filter(self): """Removes outlier values from the dataset. This method employs an OutliersFilter. If `drop_outliers_by_percentile` is True, it retains only the values between the min and max percentiles If `drop_outliers_by_percentile` is False, it retains only the values between 2nd and 98th percentiles """ self._log( f"Applying filter of outliers\n" f"interquartile_coeff={self.interquartile_coeff}\n" f"mode_percentile={self.mode_percentile}\n" f"min_percentile={self.min_percentile}\n" f"max_percentile={self.max_percentile}" ) self._apply_filter( OutliersFilter, self.interquartile_coeff, self.mode_percentile, self.min_percentile, self.max_percentile ) def match_no_rep(self, threshold: float = 0.1, approximate_match: bool = False) -> pd.DataFrame: """Matching groups with no replacement. It's done by optimizing the linear sum of distances between pairs of treatment and control samples. Args: threshold: caliper for minimum deviation between test and control groups. in case weights is not None. approximate_match: use or not approximate matching Returns: Matched dataframe with no replacements. """ a = self.input_data[self.treatment] X = self.input_data.drop(columns=self.treatment) if self.info_col is not None: X = X.drop(columns=self.info_col) index_matched = MatcherNoReplacement(X, a, self.weights, approximate_match).match() filtred_matches = index_matched.loc[1].iloc[self.input_data[a == 1].index].matches[index_matched.loc[1].iloc[self.input_data[a == 1].index].matches.apply(lambda x: x != [])] if self.weights is not None: weighted_features = [f for f in self.weights.keys()] index_dict = dict() for w in weighted_features: source = self.input_data.loc[np.concatenate(filtred_matches.values)][w].values target = self.input_data.loc[filtred_matches.index.to_list()][w].values index = abs(source - target) <= abs(source) * threshold index_dict.update({w: index}) index_filtered = sum(index_dict.values()) == len(self.weights) matched_data = pd.concat( [self.input_data.loc[filtred_matches.index.to_list()].iloc[index_filtered], self.input_data.loc[np.concatenate(filtred_matches.values)].iloc[index_filtered]] ) else: matched_data = pd.concat([self.input_data.loc[filtred_matches.index.to_list()], self.input_data.loc[np.concatenate(filtred_matches.values)]]) return matched_data def lama_feature_select(self) -> pd.DataFrame: """Calculates the importance of each feature. This method use LamaFeatureSelector to rank the importance of each feature in the dataset The features are then sorted by their importance with the most important feature first Returns: The feature importances, sorted in descending order """ self._log("Counting feature importance") feat_select = FeatureSelector( outcome=self.outcomes[0], outcome_type=self.outcome_type, treatment=self.treatment, timeout=self.timeout, n_threads=self.n_threads, n_folds=self.n_folds, verbose=self.verbose, generate_report=self.generate_report, report_dir=self.report_feat_select_dir, use_algos=self.use_algos, ) df = self.input_data if self.group_col is None else self.input_data.drop(columns=self.group_col) if self.info_col is not None: df = df.drop(columns=self.info_col) features = feat_select.perform_selection(df=df) if self.group_col is None: self.features_importance = features else: self.features_importance = features.append( {"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True ) return self.features_importance.sort_values("Importance", ascending=False) def _create_faiss_matcher(self, df=None, validation=None): """Creates a FaissMatcher object. Args: df: The dataframe to use. If None, uses self.input_data. validation: Whether to use the matcher for validation. If None, determines based on whether """ if df is None: df = self.input_data self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, weights=self.weights, features=self.features_importance, group_col=self.group_col, validation=validation, n_neighbors=self.n_neighbors, pbar=False if validation else self.pbar, ) def _perform_validation(self): """Performs validation using the FaissMatcher.""" if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature":
self.input_data, self.validate = random_feature(self.input_data)
7
2023-11-01 08:58:57+00:00
16k
tianhaowuhz/human-assisting-dex-grasp
Runners/EvalGFPPO.py
[ { "identifier": "GFPPO", "path": "Algorithms/ppo/gf_ppo_update.py", "snippet": "class GFPPO:\n def __init__(self,\n vec_env,\n cfg_train,\n device='cpu',\n sampler='sequential',\n log_dir='run',\n is_testing=False,\n print_log=True,\n apply_reset=False,\n asymmetric=False,\n args=None,\n ):\n self.args = args\n ''' PPO '''\n # PPO parameters\n if not isinstance(vec_env.observation_space, Space):\n raise TypeError(\"vec_env.observation_space must be a gym Space\")\n if not isinstance(vec_env.state_space, Space):\n raise TypeError(\"vec_env.state_space must be a gym Space\")\n if not isinstance(vec_env.action_space, Space):\n raise TypeError(\"vec_env.action_space must be a gym Space\")\n self.observation_space = vec_env.observation_space\n self.action_space = vec_env.action_space\n self.state_space = vec_env.state_space\n self.cfg_train = copy.deepcopy(cfg_train)\n learn_cfg = self.cfg_train[\"learn\"]\n self.device = device\n self.asymmetric = asymmetric\n self.desired_kl = learn_cfg.get(\"desired_kl\", None)\n self.schedule = learn_cfg.get(\"schedule\", \"fixed\")\n self.step_size = learn_cfg[\"optim_stepsize\"]\n self.init_noise_std = learn_cfg.get(\"init_noise_std\", 0.3)\n self.model_cfg = self.cfg_train[\"policy\"]\n self.num_transitions_per_env=learn_cfg[\"nsteps\"]\n self.learning_rate=learn_cfg[\"optim_stepsize\"]\n\n self.clip_param = learn_cfg[\"cliprange\"]\n self.num_learning_epochs = learn_cfg[\"noptepochs\"]\n self.num_mini_batches = learn_cfg[\"nminibatches\"]\n self.value_loss_coef = learn_cfg.get(\"value_loss_coef\", 2.0)\n self.entropy_coef = learn_cfg[\"ent_coef\"]\n self.gamma = learn_cfg[\"gamma\"]\n self.lam = learn_cfg[\"lam\"]\n self.max_grad_norm = learn_cfg.get(\"max_grad_norm\", 2.0)\n self.use_clipped_value_loss = learn_cfg.get(\"use_clipped_value_loss\", False)\n\n # policy type \n self.action_type = self.cfg_train[\"setting\"][\"action_type\"]\n self.sub_action_type = self.cfg_train[\"setting\"][\"sub_action_type\"]\n self.action_clip = self.cfg_train[\"setting\"][\"action_clip\"]\n self.grad_process = self.cfg_train[\"setting\"][\"grad_process\"]\n self.grad_scale = self.cfg_train[\"setting\"][\"grad_scale\"]\n\n if self.action_type=='joint' and self.sub_action_type=='add+jointscale':\n action_space_shape = (18+18,)\n else:\n action_space_shape = self.action_space.shape\n print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n self.vec_env = vec_env\n self.vec_env.grad_scale = self.grad_scale\n \n pointnet_version = self.cfg_train[\"policy\"][\"pointnet_version\"]\n\n hand_pcl = self.cfg_train[\"policy\"][\"hand_pcl\"]\n hand_model = None\n\n # PPO components\n self.stack_frame_numer = self.vec_env.stack_frame_numbers\n self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape,\n self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, \n sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, \n envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args)\n\n # pointnet backbone\n \n self.pointnet_finetune = self.model_cfg['finetune_pointnet']\n self.finetune_pointnet_bz = 128\n if self.model_cfg['pretrain_pointnet']:\n if pointnet_version == 'pt2':\n pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device)\n elif pointnet_version == 'pt':\n pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device)\n if self.model_cfg['shared_pointnet']:\n self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict)\n if not self.model_cfg['finetune_pointnet']:\n # freeze pointnet\n for name,param in self.actor_critic.pointnet_enc.named_parameters():\n param.requires_grad = False\n else:\n self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict)\n self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict)\n\n if not self.model_cfg['finetune_pointnet']:\n # freeze pointnet\n for name,param in self.actor_critic.actor_pointnet_enc.named_parameters():\n param.requires_grad = False\n for name,param in self.actor_critic.critic_pointnet_enc.named_parameters():\n param.requires_grad = False\n\n self.actor_critic.to(self.device)\n self.storage = RolloutStorage(self.vec_env.num_envs, self.num_transitions_per_env, self.observation_space.shape,\n self.state_space.shape, action_space_shape, self.device, sampler)\n \n self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.actor_critic.parameters()), lr=self.learning_rate)\n\n ''' SDE '''\n if 'gf' in self.vec_env.sub_obs_type:\n # init SDE config\n self.prior_fn, self.marginal_prob_fn, self.sde_fn = init_sde(\"vp\")\n self.score = CondScoreModel(\n self.marginal_prob_fn,\n hidden_dim=args.hidden_dim,\n embed_dim=args.embed_dim,\n mode=args.score_mode,\n relative=args.relative,\n space=args.space,\n pointnet_version='pt2',\n )\n model_dict = torch.load(os.path.join(args.score_model_path,'score.pt'))\n self.score.load_state_dict(model_dict)\n self.score.to(device)\n self.score.eval()\n self.points_per_object = args.points_per_object\n self.t0 = args.t0\n self.ori_grad = None\n\n ''' Log '''\n # self.log_dir = log_dir\n if self.args.model_dir != \"\" and self.vec_env.mode=='train':\n time_now = self.args.model_dir.split('/')[8].split('_')[0] \n else:\n time_now = time.strftime('%m-%d-%H-%M',time.localtime(time.time()))\n\n self.log_dir = os.path.join(f\"./logs/{args.exp_name}/{time_now}_handrot:{self.vec_env.hand_rotation}_t0:{self.t0}_sfn:{self.vec_env.stack_frame_numbers}_{self.vec_env.num_envs}ne_{len(self.vec_env.shapes_all)}obj_gpt:{self.grad_process}_gs:{self.grad_scale}_at:{self.action_type}_subat:{self.sub_action_type}_rt:{self.vec_env.reward_type}_rn:{self.vec_env.reward_normalize}_simfreq:{self.vec_env.similarity_reward_freq}_cd:{self.vec_env.close_dis}_pts:{pointnet_version}_seed{args.seed}\")\n self.print_log = print_log\n self.writer = SummaryWriter(log_dir=self.log_dir, flush_secs=10)\n self.tot_timesteps = 0\n self.tot_time = 0\n self.is_testing = is_testing\n self.current_learning_iteration = 0\n\n if save_video:\n self.video_log_dir = os.path.join(self.log_dir,'video')\n os.makedirs(self.video_log_dir,exist_ok=True)\n self.vis_env_num = self.args.vis_env_num\n\n self.apply_reset = apply_reset\n\n ''' Evaluation '''\n if 'gf_check' in self.action_type:\n self.eval_round = 20\n else:\n self.eval_round = 5\n\n if self.vec_env.mode == 'eval':\n self.eval_round = self.args.eval_times\n\n if save_state:\n self.eval_metrics = {\n 'obj_shapes':[],\n 'time_step':[],\n 'success_rate':[],\n 'gt_dist':[],\n 'stability':[],\n 'lift_nums':np.zeros(self.vec_env.num_envs),\n 'gf_state_init':[],\n 'gf_state_final':[],\n 'gf_state_gt':[],\n }\n else:\n self.eval_metrics = {\n 'obj_shapes':[],\n 'time_step':[],\n 'success_rate':[],\n 'gt_dist':[],\n 'stability':[],\n 'lift_nums':np.zeros(self.vec_env.num_envs),\n 'obj_translation':[],\n 'obj_cosine_similarity':[],\n }\n self.eval_metrics['obj_shapes'] = self.vec_env.object_types\n\n def test(self, path):\n self.actor_critic.load_state_dict(torch.load(path, map_location=self.device))\n self.actor_critic.eval()\n\n def load(self, path):\n self.actor_critic.load_state_dict(torch.load(path, map_location=self.device))\n self.current_learning_iteration = int(path.split(\"_\")[-1].split(\".\")[0])\n self.actor_critic.train()\n\n model_dir = path[:-len(path.split('/')[-1])] + f\"metric_{self.args.exp_name}_{self.args.seed}.pkl\"\n self.eval_metrics = CPickle.load(open(model_dir, 'rb'))\n\n def save(self, path):\n torch.save(self.actor_critic.state_dict(), path)\n \n def eval(self, it):\n # eval initilization\n self.vec_env.eval(vis=save_video)\n test_times = 0\n success_times = 0 # total_success_times / total_trials\n success_rates = [] # s_rate for each round\n reward_all = []\n if 'gf_check' in self.action_type:\n total_diff_direction_num = 0\n total_dof_error = 0\n diff_joint_num = torch.zeros(18,device=self.device)\n \n if self.vec_env.mode == 'train':\n save_time = 0 # means save all videos\n else:\n save_time = self.eval_round - 1\n\n # start evaluation\n with tqdm(total=self.eval_round) as pbar:\n pbar.set_description('Validating:')\n with torch.no_grad():\n for r in range(self.eval_round) :\n if save_video and r<=save_time:\n all_images = torch.tensor([],device=self.device)\n # reset env\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n eval_done_envs = torch.zeros(self.vec_env.num_envs, dtype=torch.long, device=self.device)\n\n if save_state:\n self.eval_metrics['gf_state_init'].append(self.vec_env.get_states(gf_state=True))\n self.eval_metrics['gf_state_gt'].append(self.vec_env.target_hand_dof)\n\n # step\n while True :\n # Compute the action\n actions, grad = self.compute_action(current_obs=current_obs,mode='eval')\n # print(grad)\n step_actions = self.process_actions(actions=actions.clone(), grad=grad)\n # primitive_actions.append(torch.mean(grad).item())\n # all_actions.append(torch.mean(step_actions).item())\n if self.vec_env.progress_buf[0] == 49 and save_state:\n self.eval_metrics['gf_state_final'].append(self.vec_env.get_states(gf_state=True))\n\n # Step the vec_environment\n next_obs, rews, dones, infos = self.vec_env.step(step_actions, (actions,grad))\n\n if save_video and r<=save_time:\n image = self.vec_env.render(rgb=True, img_size=img_size, vis_env_num=self.vis_env_num).reshape(self.vis_env_num, 1, img_size, img_size, 3)\n all_images = torch.cat([all_images, image],1)\n current_obs.copy_(next_obs['obs'])\n\n # done\n new_done_env_ids = (dones&(1-eval_done_envs)).nonzero(as_tuple=False).squeeze(-1)\n if len(new_done_env_ids) > 0:\n if self.vec_env.disable_collision:\n print('-----------------------------------')\n print('no coll succ:', infos['success_num'])\n self.vec_env.grasp_filter(states=self.eval_metrics['gf_state_final'][r], test_time=1, reset_coll=True)\n \n self.eval_metrics['time_step'].append(it)\n self.eval_metrics['success_rate'].append(float(infos['success_rate'].cpu().numpy()))\n # self.eval_metrics['obj_translation'].append(float(infos['obj_translation'].cpu().numpy()))\n # self.eval_metrics['obj_cosine_similarity'].append(float(infos['obj_cosine_similarity'].cpu().numpy()))\n self.eval_metrics['gt_dist'].append(float(infos['gt_dist'].cpu().numpy()))\n self.eval_metrics['lift_nums']+=infos['lift_nums'].cpu().numpy()\n if self.vec_env.mode == 'eval':\n with open(f'logs/{self.args.exp_name}/metrics_{self.args.eval_name}_eval_{self.args.seed}.pkl', 'wb') as f: \n pickle.dump(self.eval_metrics, f)\n else:\n with open(os.path.join(self.log_dir, f'metric_{self.args.exp_name}_{self.args.seed}.pkl'), 'wb') as f: \n pickle.dump(self.eval_metrics, f)\n\n if 'gf_check' in self.action_type:\n final_hand_dof = self.vec_env.final_hand_dof\n target_hand_dof = self.vec_env.target_hand_dof\n diff_direction_ids = ((self.vec_env.final_hand_dof * self.vec_env.target_hand_dof)<0).nonzero() \n same_direction_ids = ((self.vec_env.final_hand_dof * self.vec_env.target_hand_dof)>0).nonzero() \n for mm in range(18):\n diff_joint_num[mm] += torch.sum(diff_direction_ids[:,1]==mm) \n print(len(diff_direction_ids)/self.vec_env.num_envs)\n print(diff_joint_num)\n dof_error = torch.mean(abs(target_hand_dof[same_direction_ids[:,0],same_direction_ids[:,1]] - final_hand_dof[same_direction_ids[:,0],same_direction_ids[:,1]]))\n print(dof_error)\n total_diff_direction_num+=(len(diff_direction_ids)/self.vec_env.num_envs)\n total_dof_error+=(dof_error)\n\n if r > save_time:\n self.vec_env.graphics_device_id = -1\n self.vec_env.enable_camera_sensors = False\n\n if save_video and r<=save_time:\n for (i,images) in enumerate(all_images):\n obj_type = self.vec_env.object_type_per_env[i]\n save_path = os.path.join(self.video_log_dir,f'{obj_type}_epoach:{it}_round:{r}')\n images_to_video(path=save_path, images=images.cpu().numpy(), size=(img_size,img_size))\n\n test_times += len(new_done_env_ids)\n success_times += infos['success_num']\n reward_all.extend(rews[new_done_env_ids].cpu().numpy())\n eval_done_envs[new_done_env_ids] = 1\n print(f'eval_success_rate: {success_times/test_times}')\n success_rates.append(infos['success_num'] / len(new_done_env_ids))\n\n if test_times==(r+1)*self.vec_env.num_envs:\n break\n pbar.update(1)\n if 'gf_check' in self.action_type:\n print(f'total_diff_direction_num:{total_diff_direction_num/self.eval_round}')\n print(f'total_dof_error:{total_dof_error/self.eval_round}')\n\n assert test_times==self.eval_round*self.vec_env.num_envs\n success_rates = torch.tensor(success_rates)\n sr_mu, sr_std = success_rates.mean().cpu().numpy().item(), success_rates.std().cpu().numpy().item()\n print(f'====== t0: {self.t0} || num_envs: {self.vec_env.num_envs} || eval_times: {self.eval_round}')\n print(f'eval_success_rate: {sr_mu:.2f} +- {sr_std:.2f}')\n eval_rews = np.mean(reward_all)\n print(f'eval_rewards: {eval_rews}')\n self.writer.add_scalar('Eval/success_rate', sr_mu, it)\n self.writer.add_scalar('Eval/eval_rews', eval_rews, it)\n\n def run(self, num_learning_iterations, log_interval=1):\n if self.is_testing:\n self.eval(0)\n else:\n # train initilization\n self.actor_critic.train()\n self.vec_env.train()\n rewbuffer = deque(maxlen=100)\n lenbuffer = deque(maxlen=100)\n cur_reward_sum = torch.zeros(self.vec_env.num_envs, dtype=torch.float, device=self.device)\n cur_episode_length = torch.zeros(self.vec_env.num_envs, dtype=torch.float, device=self.device)\n reward_sum = []\n episode_length = []\n\n # reset env\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n for it in range(self.current_learning_iteration, num_learning_iterations):\n start = time.time()\n ep_infos = []\n if 'ori_similarity' in self.vec_env.reward_type:\n ori_sim_all = []\n # Rollout\n for _ in range(self.num_transitions_per_env):\n if self.apply_reset:\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n\n # Compute the action\n actions, actions_log_prob, values, mu, sigma, grad = self.compute_action(current_obs=current_obs, current_states=current_states)\n step_actions = self.process_actions(actions=actions.clone(), grad=grad)\n\n # Step the vec_environment\n next_obs, rews, dones, infos = self.vec_env.step(step_actions, (actions,grad))\n\n next_states = self.vec_env.get_state()\n\n # Record the transition\n self.storage.add_transitions(current_obs, current_states, actions, rews, dones, values, actions_log_prob, mu, sigma)\n current_obs.copy_(next_obs['obs'])\n current_states.copy_(next_states)\n\n # Book keeping\n ep_infos.append(infos.copy())\n # set_trace()\n if 'ori_similarity' in self.vec_env.reward_type:\n ori_sim_all.append(torch.mean(infos['ori_similarity']))\n # self.writer.add_scalar('Episode/ori_sim_all', torch.mean(infos['ori_similarity']), _)\n\n if self.print_log:\n cur_reward_sum[:] += rews\n cur_episode_length[:] += 1\n\n new_ids = (dones > 0).nonzero(as_tuple=False)\n reward_sum.extend(cur_reward_sum[new_ids][:, 0].cpu().numpy().tolist())\n episode_length.extend(cur_episode_length[new_ids][:, 0].cpu().numpy().tolist())\n cur_reward_sum[new_ids] = 0\n cur_episode_length[new_ids] = 0\n \n # done\n if torch.sum(dones) > 0:\n current_obs = self.vec_env.reset(dones)['obs']\n current_states = self.vec_env.get_state()\n print(infos['success_rate'])\n if 'ori_similarity' in self.vec_env.reward_type:\n fig = plt.figure()\n plt.plot(torch.tensor(ori_sim_all).cpu().numpy())\n ori_sim_all_img = get_img_from_fig(fig, dpi=100)\n # ori_sim_all_img = cv2.resize(ori_sim_all_img,(256,256))\n self.writer.add_image(\"ori_sim\", ori_sim_all_img, it, dataformats='HWC')\n\n if self.print_log:\n # reward_sum = [x[0] for x in reward_sum]\n # episode_length = [x[0] for x in episode_length]\n rewbuffer.extend(reward_sum)\n lenbuffer.extend(episode_length)\n\n _, _, last_values, _, _, _ = self.compute_action(current_obs=current_obs, current_states=current_states, mode='train')\n stop = time.time()\n collection_time = stop - start\n mean_trajectory_length, mean_reward = self.storage.get_statistics()\n\n # Learning step\n start = stop\n self.storage.compute_returns(last_values, self.gamma, self.lam)\n mean_value_loss, mean_surrogate_loss = self.update()\n self.storage.clear()\n stop = time.time()\n learn_time = stop - start\n if self.print_log:\n self.log(locals())\n if it % log_interval == 0:\n self.actor_critic.eval()\n self.eval(it)\n self.actor_critic.train()\n self.vec_env.train()\n self.save(os.path.join(self.log_dir, 'model_{}.pt'.format(it)))\n\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n cur_episode_length[:] = 0\n # TODO clean extras\n ep_infos.clear()\n self.save(os.path.join(self.log_dir, 'model_{}.pt'.format(num_learning_iterations)))\n\n def log(self, locs, width=70, pad=35):\n self.tot_timesteps += self.num_transitions_per_env * self.vec_env.num_envs\n self.tot_time += locs['collection_time'] + locs['learn_time']\n iteration_time = locs['collection_time'] + locs['learn_time']\n\n ep_string = f''\n if locs['ep_infos']:\n for key in locs['ep_infos'][0]:\n infotensor = torch.tensor([], device=self.device)\n for ep_info in locs['ep_infos']:\n infotensor = torch.cat((infotensor, ep_info[key].to(self.device)))\n if key=='success_num':\n value = torch.sum(infotensor)\n self.writer.add_scalar('Episode/' + 'total_success_num', value, locs['it'])\n ep_string += f\"\"\"{f'Total episode {key}:':>{pad}} {value:.4f}\\n\"\"\"\n value = torch.mean(infotensor)\n self.writer.add_scalar('Episode/' + key, value, locs['it'])\n ep_string += f\"\"\"{f'Mean episode {key}:':>{pad}} {value:.4f}\\n\"\"\"\n mean_std = self.actor_critic.log_std.exp().mean()\n\n self.writer.add_scalar('Loss/value_function', locs['mean_value_loss'], locs['it'])\n self.writer.add_scalar('Loss/surrogate', locs['mean_surrogate_loss'], locs['it'])\n self.writer.add_scalar('Policy/mean_noise_std', mean_std.item(), locs['it'])\n if len(locs['rewbuffer']) > 0:\n self.writer.add_scalar('Train/mean_reward', statistics.mean(locs['rewbuffer']), locs['it'])\n self.writer.add_scalar('Train/mean_episode_length', statistics.mean(locs['lenbuffer']), locs['it'])\n self.writer.add_scalar('Train/mean_reward/time', statistics.mean(locs['rewbuffer']), self.tot_time)\n self.writer.add_scalar('Train/mean_episode_length/time', statistics.mean(locs['lenbuffer']), self.tot_time)\n\n self.writer.add_scalar('Train2/mean_reward/step', locs['mean_reward'], locs['it'])\n self.writer.add_scalar('Train2/mean_episode_length/episode', locs['mean_trajectory_length'], locs['it'])\n\n fps = int(self.num_transitions_per_env * self.vec_env.num_envs / (locs['collection_time'] + locs['learn_time']))\n\n str = f\" \\033[1m Learning iteration {locs['it']}/{locs['num_learning_iterations']} \\033[0m \"\n\n if len(locs['rewbuffer']) > 0:\n log_string = (f\"\"\"{'#' * width}\\n\"\"\"\n f\"\"\"{str.center(width, ' ')}\\n\\n\"\"\"\n f\"\"\"{'Computation:':>{pad}} {fps:.0f} steps/s (collection: {locs[\n 'collection_time']:.3f}s, learning {locs['learn_time']:.3f}s)\\n\"\"\"\n f\"\"\"{'Value function loss:':>{pad}} {locs['mean_value_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Surrogate loss:':>{pad}} {locs['mean_surrogate_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Mean action noise std:':>{pad}} {mean_std.item():.2f}\\n\"\"\"\n f\"\"\"{'Mean reward:':>{pad}} {statistics.mean(locs['rewbuffer']):.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length:':>{pad}} {statistics.mean(locs['lenbuffer']):.2f}\\n\"\"\"\n f\"\"\"{'Mean reward/step:':>{pad}} {locs['mean_reward']:.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length/episode:':>{pad}} {locs['mean_trajectory_length']:.2f}\\n\"\"\")\n else:\n log_string = (f\"\"\"{'#' * width}\\n\"\"\"\n f\"\"\"{str.center(width, ' ')}\\n\\n\"\"\"\n f\"\"\"{'Computation:':>{pad}} {fps:.0f} steps/s (collection: {locs[\n 'collection_time']:.3f}s, learning {locs['learn_time']:.3f}s)\\n\"\"\"\n f\"\"\"{'Value function loss:':>{pad}} {locs['mean_value_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Surrogate loss:':>{pad}} {locs['mean_surrogate_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Mean action noise std:':>{pad}} {mean_std.item():.2f}\\n\"\"\"\n f\"\"\"{'Mean reward/step:':>{pad}} {locs['mean_reward']:.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length/episode:':>{pad}} {locs['mean_trajectory_length']:.2f}\\n\"\"\")\n\n log_string += ep_string\n log_string += (f\"\"\"{'-' * width}\\n\"\"\"\n f\"\"\"{'Total timesteps:':>{pad}} {self.tot_timesteps}\\n\"\"\"\n f\"\"\"{'Iteration time:':>{pad}} {iteration_time:.2f}s\\n\"\"\"\n f\"\"\"{'Total time:':>{pad}} {self.tot_time:.2f}s\\n\"\"\"\n f\"\"\"{'ETA:':>{pad}} {self.tot_time / (locs['it'] + 1) * (\n locs['num_learning_iterations'] - locs['it']):.1f}s\\n\"\"\")\n print(log_string)\n\n def update(self):\n mean_value_loss = 0\n mean_surrogate_loss = 0\n\n batch = self.storage.mini_batch_generator(self.num_mini_batches)\n\n for epoch in range(self.num_learning_epochs):\n # for obs_batch, actions_batch, target_values_batch, advantages_batch, returns_batch, old_actions_log_prob_batch \\\n # in self.storage.mini_batch_generator(self.num_mini_batches):\n\n for indices in batch:\n # print(len(indices))\n\n obs_batch = self.storage.observations.view(-1, *self.storage.observations.size()[2:])[indices]\n if self.asymmetric:\n states_batch = self.storage.states.view(-1, *self.storage.states.size()[2:])[indices]\n else:\n states_batch = None\n actions_batch = self.storage.actions.view(-1, self.storage.actions.size(-1))[indices]\n target_values_batch = self.storage.values.view(-1, 1)[indices]\n returns_batch = self.storage.returns.view(-1, 1)[indices]\n old_actions_log_prob_batch = self.storage.actions_log_prob.view(-1, 1)[indices]\n advantages_batch = self.storage.advantages.view(-1, 1)[indices]\n old_mu_batch = self.storage.mu.view(-1, self.storage.actions.size(-1))[indices]\n old_sigma_batch = self.storage.sigma.view(-1, self.storage.actions.size(-1))[indices]\n\n actions_log_prob_batch, entropy_batch, value_batch, mu_batch, sigma_batch = self.actor_critic.evaluate(obs_batch,\n states_batch,\n actions_batch)\n\n # KL\n if self.desired_kl != None and self.schedule == 'adaptive':\n\n kl = torch.sum(\n sigma_batch - old_sigma_batch + (torch.square(old_sigma_batch.exp()) + torch.square(old_mu_batch - mu_batch)) / (2.0 * torch.square(sigma_batch.exp())) - 0.5, axis=-1)\n kl_mean = torch.mean(kl)\n\n if kl_mean > self.desired_kl * 2.0:\n self.step_size = max(1e-5, self.step_size / 1.5)\n elif kl_mean < self.desired_kl / 2.0 and kl_mean > 0.0:\n self.step_size = min(1e-2, self.step_size * 1.5)\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.step_size\n\n # Surrogate loss\n ratio = torch.exp(actions_log_prob_batch - torch.squeeze(old_actions_log_prob_batch))\n surrogate = -torch.squeeze(advantages_batch) * ratio\n surrogate_clipped = -torch.squeeze(advantages_batch) * torch.clamp(ratio, 1.0 - self.clip_param,\n 1.0 + self.clip_param)\n surrogate_loss = torch.max(surrogate, surrogate_clipped).mean()\n\n # Value function loss\n if self.use_clipped_value_loss:\n value_clipped = target_values_batch + (value_batch - target_values_batch).clamp(-self.clip_param,\n self.clip_param)\n value_losses = (value_batch - returns_batch).pow(2)\n value_losses_clipped = (value_clipped - returns_batch).pow(2)\n value_loss = torch.max(value_losses, value_losses_clipped).mean()\n else:\n value_loss = (returns_batch - value_batch).pow(2).mean()\n\n loss = surrogate_loss + self.value_loss_coef * value_loss - self.entropy_coef * entropy_batch.mean()\n\n # Gradient step\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n mean_value_loss += value_loss.item()\n mean_surrogate_loss += surrogate_loss.item()\n\n num_updates = self.num_learning_epochs * self.num_mini_batches\n mean_value_loss /= num_updates\n mean_surrogate_loss /= num_updates\n\n return mean_value_loss, mean_surrogate_loss\n\n '''\n utils\n '''\n def grad_norm(self,grad):\n scale_grad = (torch.max((abs(grad)),dim=1)[0]).reshape(-1,1).expand_as(grad)\n grad = grad/scale_grad\n return grad\n \n \n def action2grad(self, x, inv=False, relative=True, cur_x=None):\n if not inv:\n batch_size = x.size(0)\n state_dim = x.size(1)\n x = torch.cat([torch.sin(x).reshape(batch_size,state_dim,1), torch.cos(x).reshape(batch_size,state_dim,1)],2).reshape(batch_size,-1)\n return x\n else:\n batch_size = x.size(0)\n state_dim = x.size(1)\n x = x.reshape(batch_size,int(state_dim/2),2)\n cur_x = cur_x.reshape(batch_size,int(state_dim/2),2)\n\n cur_x = torch.cat([-cur_x[:,:,0:1], cur_x[:,:,1:2]],dim=-1)\n ori_grad = torch.sum(torch.cat([x[:,:,1:2], x[:,:,0:1]], dim=-1) * cur_x, dim=-1, keepdim=True).reshape(batch_size,int(state_dim/2))\n return ori_grad\n \n def get_obs_with_grad(self, current_obs, reset=False, t=None):\n # compute score\n B = current_obs.size(0)\n cur_hand_dof = current_obs[:,:18].clone() #【-1,1】\n pcl_index = self.stack_frame_numer*7 + 18\n cur_obj_pcl = current_obs[:,pcl_index:self.points_per_object*3+pcl_index].clone().reshape(-1, 3, self.points_per_object)\n\n if reset:\n with torch.no_grad(): \n in_process_sample, res = cond_ode_sampler(\n self.score,\n self.prior_fn,\n self.sde_fn,\n (cur_hand_dof, cur_obj_pcl),\n t0=0.5,\n device=self.device,\n num_steps=51,\n batch_size=B,\n space=self.args.space,\n )\n goal_pose = in_process_sample[-1,:,:]\n return goal_pose\n else:\n if self.args.space == 'riemann':\n if 'direct' in self.args.score_model_path:\n cur_hand_dof = self.vec_env.dof_norm(cur_hand_dof,inv=True)\n cur_hand_dof = self.action2grad(cur_hand_dof)\n\n if t is None:\n batch_time_step = torch.ones(B, device=self.device).unsqueeze(1) * self.t0\n else:\n t_max = 0.5\n t_min = 1e-5\n t = torch.tanh(t) * (t_max - t_min) / 2 + (t_max + t_min)/2\n batch_time_step = torch.clamp(t.reshape(B,-1), 1e-5, 0.5)\n self.vec_env.extras['t_value'] = torch.mean(abs(batch_time_step),-1)\n\n if self.args.space == 'riemann':\n grad = torch.zeros(B,36,device=self.device)\n elif self.args.space == 'euler':\n grad = torch.zeros(B,18,device=self.device)\n\n bz = 256\n iter_num = int(np.ceil(B/bz))\n\n for order in range(iter_num):\n with torch.no_grad(): \n if self.args.space == 'riemann':\n grad[order*bz:(order+1)*bz,:36] = self.score((cur_hand_dof[order*bz:(order+1)*bz,:], cur_obj_pcl[order*bz:(order+1)*bz,:]), batch_time_step[order*bz:(order+1)*bz,:]).detach()\n elif self.args.space == 'euler': \n grad[order*bz:(order+1)*bz,:18] = self.score((cur_hand_dof[order*bz:(order+1)*bz,:], cur_obj_pcl[order*bz:(order+1)*bz,:]), batch_time_step[order*bz:(order+1)*bz,:]).detach()\n\n if self.args.space == 'riemann':\n grad = self.action2grad(grad, inv=True, cur_x=cur_hand_dof)\n\n if 'pure_ori_similarity' in self.vec_env.reward_type:\n self.ori_grad = grad.clone()\n\n if 'direct' not in self.args.score_model_path:\n #denormalize to dof original range\n grad = grad * self.vec_env.shadow_hand_dof_range[self.vec_env.actuated_dof_indices] / 2\n\n if self.grad_process is not None:\n if 'norm' in self.grad_process:\n grad = self.grad_norm(grad)\n if 'clip' in self.grad_process:\n grad = torch.clamp(grad,-self.grad_scale,self.grad_scale)\n if 'scale' in self.grad_process:\n grad = grad * self.grad_scale\n\n if 'pure_ori_similarity' not in self.vec_env.reward_type:\n self.ori_grad = grad.clone()\n\n if self.action_type != 'controlt':\n current_obs[:,-18:] = grad\n\n # print(grad[0])\n return current_obs, grad\n \n def process_actions(self, actions, grad):\n if self.action_type=='joint':\n if self.sub_action_type=='add+jointscale':\n self.vec_env.extras['grad_ss_mean'] = torch.mean(abs(actions[:,:18]),-1)\n self.vec_env.extras['grad_ss_std'] = torch.std(abs(actions[:,:18]),-1)\n self.vec_env.extras['residual_mean'] = torch.mean(abs(actions[:,18:]),-1)\n self.vec_env.extras['residual_std'] = torch.std(abs(actions[:,18:]),-1)\n step_actions = grad*actions[:,:18] + actions[:,18:]\n else:\n step_actions = actions*grad\n elif self.action_type=='direct':\n step_actions = actions\n elif 'gf' in self.action_type:\n step_actions = grad\n return step_actions\n\n def compute_action(self, current_obs, current_states=None, mode='train'):\n # compute gf\n if 'gf' in self.vec_env.sub_obs_type:\n current_obs, grad = self.get_obs_with_grad(current_obs)\n else:\n grad = torch.zeros((current_obs.size(0),18), device=self.device)\n\n if self.pointnet_finetune:\n batch_num = current_obs.size(0)//self.finetune_pointnet_bz + 1\n for _ in range(batch_num):\n current_obs_batch = current_obs[self.finetune_pointnet_bz*_:self.finetune_pointnet_bz*(_+1),:]\n # current_states_batch = current_states[:,self.finetune_pointnet_bz*batch_num+self.finetune_pointnet_bz*(batch_num+1)]\n if mode=='train':\n actions_batch, actions_log_prob_batch, values_batch, mu_batch, sigma_batch = self.actor_critic.act(current_obs_batch, current_states)\n else:\n actions_batch = self.actor_critic.act_inference(current_obs_batch)\n if _ == 0:\n if mode=='train':\n actions, actions_log_prob, values, mu, sigma = actions_batch, actions_log_prob_batch, values_batch, mu_batch, sigma_batch\n else:\n actions = actions_batch\n else:\n if mode=='train':\n actions = torch.cat([actions, actions_batch])\n actions_log_prob = torch.cat([actions_log_prob,actions_log_prob_batch])\n values = torch.cat([values,values_batch])\n mu = torch.cat([mu, mu_batch])\n sigma = torch.cat([sigma, sigma_batch])\n else:\n actions = torch.cat([actions, actions_batch])\n else:\n if mode=='train':\n actions, actions_log_prob, values, mu, sigma = self.actor_critic.act(current_obs, current_states)\n else:\n actions = self.actor_critic.act_inference(current_obs)\n\n if mode=='train':\n return actions, actions_log_prob, values, mu, sigma, grad\n else:\n return actions, grad" }, { "identifier": "load_cfg", "path": "utils/config.py", "snippet": "def load_cfg(args):\n with open(os.path.join(os.path.dirname(__file__), '../ConDexEnv/condexenvs/cfg/train/', args.cfg_train+'.yaml'), 'r') as f:\n cfg_train = yaml.load(f, Loader=yaml.SafeLoader)\n\n logdir = args.logdir\n\n # Set deterministic mode\n if args.torch_deterministic:\n cfg_train[\"torch_deterministic\"] = True\n\n # Override seed if passed on the command line\n if args.seed is not None:\n cfg_train[\"seed\"] = args.seed\n\n log_id = args.logdir + \"_{}\".format(args.experiment)\n\n logdir = os.path.realpath(log_id)\n # os.makedirs(logdir, exist_ok=True)\n\n return cfg_train, logdir" }, { "identifier": "get_args", "path": "utils/config.py", "snippet": "def get_args(benchmark=False, use_rlg_config=False):\n custom_parameters = [\n \n # env \n {\"name\": \"--headless\", \"action\": \"store_true\", \"default\": False, \"help\": \"Force display off at all times\"},\n {\"name\": \"--rl_device\", \"type\": str, \"default\": \"cuda:1\", \"help\": \"Choose CPU or GPU device for inferencing policy network\"},\n {\"name\": \"--randomize\", \"action\": \"store_true\", \"default\": False, \"help\": \"Apply physics domain randomization\"},\n {\"name\": \"--num_envs\", \"type\": int, \"default\": 2, \"help\": \"Number of environments to create - override config file\"},\n {\"name\": \"--episode_length\", \"type\": int, \"default\": 0, \"help\": \"Episode length, by default is read from yaml config\"},\n {\"name\": \"--seed\", \"type\": int, \"help\": \"Random seed\"},\n {\"name\": \"--points_per_object\", \"type\": int, \"default\": 1024, \"help\": \"points for each object pcl\"},\n {\"name\": \"--method\", \"type\": str, \"default\": \"gf+rl\", \"help\": \"method\"},\n {\"name\": \"--run_device_id\", \"type\": int, \"help\": \"device id\"},\n {\"name\": \"--dataset_type\", \"type\": str, \"default\": \"train\", \"help\": \"method\"},\n # mode\n {\"name\": \"--mode\", \"type\": str, \"default\": \"train\", \"help\": \"env_mode\"},\n {\"name\": \"--test\", \"action\": \"store_true\", \"default\": False, \"help\": \"Run trained policy, no training\"},\n {\"name\": \"--eval_times\", \"type\": int, \"default\": 5, \"help\": \"eval times for each object\"},\n {\"name\": \"--constrained\", \"action\": \"store_true\", \"help\": \"whether constrain base\"},\n \n # score matching parameter\n {\"name\": \"--sigma\", \"type\": float, \"default\": 25, \"help\": \"eval times for each object\"},\n {\"name\": \"--t0\", \"type\": float, \"default\": 0.1, \"help\": \"t0 for sample\"},\n {\"name\": \"--hidden_dim\", \"type\": int, \"default\": 1024, \"help\": \"num of hidden dim\"},\n {\"name\": \"--embed_dim\", \"type\": int, \"default\": 512, \"help\": \"num of embed_dim\"},\n {\"name\": \"--score_mode\", \"type\": str, \"default\": \"target\", \"help\": \"score mode\"},\n {\"name\": \"--space\", \"type\": str, \"default\": \"riemann\", \"help\": \"angle space\"},\n {\"name\": \"--relative\", \"action\": \"store_false\", \"help\": \"relative pcl representation\"},\n {\"name\": \"--score_model_path\", \"type\": str, \"default\": \"./logs/train_all_rel_p2cuda_v_2e-4_2\", \"help\": \"pretrain score model path\"},\n # rl train \n {\"name\": \"--torch_deterministic\", \"action\": \"store_true\", \"default\": False, \"help\": \"Apply additional PyTorch settings for more deterministic behaviour\"},\n {\"name\": \"--metadata\", \"action\": \"store_true\", \"default\": False, \"help\": \"Requires --experiment flag, adds physics engine, sim device, pipeline info and if domain randomization is used to the experiment name provided by user\"},\n {\"name\": \"--resume\", \"type\": int, \"default\": 0, \"help\": \"Resume training or start testing from a checkpoint\"},\n {\"name\": \"--cfg_train\", \"type\": str, \"default\": \"ShadowHandConPPO\"},\n {\"name\": \"--max_iterations\", \"type\": int, \"default\": 0, \"help\": \"Set a maximum number of training iterations\"},\n {\"name\": \"--minibatch_size\", \"type\": int, \"default\": -1, \"help\": \"Set batch size for PPO optimization step. Supported only by rl_games. If not -1 overrides the config settings.\"},\n # log\n {\"name\": \"--logdir\", \"type\": str, \"default\": \"logs/gfppo/\"}, \n {\"name\": \"--experiment\", \"type\": str, \"default\": \"Base\", \"help\": \"Experiment name. If used with --metadata flag an additional information about physics engine, sim device, pipeline and domain randomization will be added to the name\"},\n {\"name\": \"--model_dir\", \"type\": str, \"default\": \"\", \"help\": \"Choose a model dir\"},\n {\"name\": \"--exp_name\", \"type\": str, \"default\": \"ours\", \"help\": \"exp_name\"},\n {\"name\": \"--eval_name\", \"type\": str, \"default\": \"ours\", \"help\": \"exp_name\"},\n {\"name\": \"--vis_env_num\", \"type\": int, \"default\": \"0\", \"help\": \"vis env num\"},\n ]\n \n\n # parse arguments\n args = gymutil.parse_arguments(\n description=\"RL Policy\",\n custom_parameters=custom_parameters)\n\n # allignment with examples\n args.device_id = args.compute_device_id\n args.device = args.sim_device_type if args.use_gpu_pipeline else 'cpu'\n\n if args.test:\n args.train = False\n else:\n args.train = True\n\n return args" }, { "identifier": "set_np_formatting", "path": "utils/config.py", "snippet": "def set_np_formatting():\n np.set_printoptions(edgeitems=30, infstr='inf',\n linewidth=4000, nanstr='nan', precision=2,\n suppress=False, threshold=10000, formatter=None)" } ]
import isaacgym import condexenvs import torch import os import sys from Algorithms.ppo import GFPPO from utils.config import load_cfg, get_args, set_np_formatting
10,971
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "sr" sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True cfg_train["learn"]["test"] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = args.run_device_id, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "sr" sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True cfg_train["learn"]["test"] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = args.run_device_id, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
runner = GFPPO(vec_env=envs,
0
2023-11-09 06:08:40+00:00
16k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)" }, { "identifier": "LayerNorm", "path": "rhofold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "Rigid", "path": "rhofold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "rhofold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" }, { "identifier": "RNAAlphabet", "path": "rhofold/utils/alphabet.py", "snippet": "class RNAAlphabet(Alphabet):\n\n def get_batch_converter(self):\n if self.use_msa:\n return RNAMSABatchConverter(self)\n else:\n return BatchConverter(self)\n\n @classmethod\n def from_architecture(cls, name: str, ) -> \"RNAAlphabet\":\n if name in (\"RNA MSA Transformer\", \"rna_msa_transformer\", \"RNA\"):\n standard_toks = rna_msaseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(\n standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa\n )" }, { "identifier": "RNAConverter", "path": "rhofold/utils/converter.py", "snippet": "class RNAConverter():\n \"\"\"RNA Structure Converter.\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n self.eps = 1e-4\n self.__init()\n\n def __init(self):\n \"\"\"\"\"\"\n\n self.cord_dict = defaultdict(dict)\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n for atom_name, _, cord_vals in RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]:\n self.cord_dict[resd_name][atom_name] = torch.tensor(cord_vals, dtype=torch.float32)\n\n trans_dict_all = {}\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n trans_dict = {}\n cord_dict = {}\n\n atom_infos = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n angl_infos = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n n_angls = len(angl_infos)\n \n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == 0:\n cord_dict[atom_name] = self.cord_dict[resd_name][atom_name]\n\n trans_dict['omega-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n trans_dict['phi-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n\n for idx_angl, (angl_name, _, atom_names_sel) in enumerate(angl_infos):\n x1 = cord_dict[atom_names_sel[0]]\n x2 = cord_dict[atom_names_sel[1]]\n x3 = cord_dict[atom_names_sel[2]]\n rot, tsl_vec = calc_rot_tsl(x1, x3, x3 + (x3 - x2))\n trans_dict['%s-main' % angl_name] = (rot, tsl_vec)\n\n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == idx_angl + 3:\n cord_dict[atom_name] = tsl_vec + torch.sum(\n rot * self.cord_dict[resd_name][atom_name].view(1, 3), dim=1)\n\n for idx_angl_src in range(1, n_angls - 1):\n idx_angl_dst = idx_angl_src + 1\n angl_name_src = angl_infos[idx_angl_src][0]\n angl_name_dst = angl_infos[idx_angl_dst][0]\n rot_src, tsl_vec_src = trans_dict['%s-main' % angl_name_src]\n rot_dst, tsl_vec_dst = trans_dict['%s-main' % angl_name_dst]\n rot = torch.matmul(rot_src.transpose(1, 0), rot_dst)\n tsl_vec = torch.matmul(rot_src.transpose(1, 0), tsl_vec_dst - tsl_vec_src)\n trans_dict['%s-%s' % (angl_name_dst, angl_name_src)] = (rot, tsl_vec)\n\n trans_dict_all[resd_name] = trans_dict\n\n self.trans_dict_init = trans_dict_all\n\n def build_cords(self, seq, fram, angl, rtn_cmsk=False):\n\n # initialization\n n_resds = len(seq)\n device = angl.device\n\n angl = angl.squeeze(dim=0) / (torch.norm(angl.squeeze(dim=0), dim=2, keepdim=True) + self.eps)\n rigid = Rigid.from_tensor_7(fram, normalize_quats=True)\n fram = rigid.to_tensor_4x4()\n rot = fram[:,:,:3,:3]\n tsl = fram[:,:,:3,3:].permute(0,1,3,2)\n\n fram = torch.cat([rot, tsl], dim=2)[:,:,:4,:3].permute(1,0,2,3)\n fmsk = torch.ones((n_resds, 1), dtype=torch.int8, device=device)\n amsk = torch.ones((n_resds, RNA_CONSTANTS.N_ANGLS_PER_RESD_MAX), dtype=torch.int8, device=device)\n cord = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX, 3), dtype=torch.float32, device=device)\n cmsk = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX), dtype=torch.int8, device=device)\n\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n idxs = [x for x in range(n_resds) if seq[x] == resd_name]\n if len(idxs) == 0:\n continue\n cord[idxs], cmsk[idxs] =\\\n self.__build_cord(resd_name, fram[idxs], fmsk[idxs], angl[idxs], amsk[idxs])\n\n return (cord, cmsk) if rtn_cmsk else (cord)\n\n def __build_cord(self, resd_name, fram, fmsk, angl, amsk):\n \"\"\"\"\"\"\n\n # initialization\n device = fram.device\n n_resds = fram.shape[0]\n atom_names_all = RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]\n atom_names_pad = atom_names_all + ['X'] * (RNA_CONSTANTS.ATOM_NUM_MAX - len(atom_names_all))\n atom_infos_all = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n\n cord_dict = defaultdict(\n lambda: torch.zeros((n_resds, 3), dtype=torch.float32, device=device))\n cmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n fram_null = torch.tensor(\n [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]], dtype=torch.float32, device=device)\n fram_dict = defaultdict(lambda: fram_null.unsqueeze(dim=0).repeat(n_resds, 1, 1))\n fmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n trans_dict = {'main': (fram[:, 0, :3], fram[:, 0, 3])}\n\n rot_curr, tsl_curr = trans_dict['main']\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == 0]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk[:, 0]\n\n # determine 3D coordinates of atoms belonging to side-chain rigid-groups\n angl_infos_all = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n rgrp_names_all = ['omega', 'phi'] + [x[0] for x in angl_infos_all]\n\n for idx_rgrp, rgrp_name_curr in enumerate(rgrp_names_all):\n if rgrp_name_curr in ['omega', 'phi', 'angl_0', 'angl_1']:\n rgrp_name_prev = 'main'\n else:\n rgrp_name_prev = 'angl_%d' % (int(rgrp_name_curr[-1]) - 1)\n\n rot_prev, tsl_prev = trans_dict[rgrp_name_prev]\n rot_base, tsl_vec_base = \\\n self.trans_dict_init[resd_name]['%s-%s' % (rgrp_name_curr, rgrp_name_prev)]\n rot_base = rot_base.unsqueeze(dim=0).to(device)\n tsl_base = tsl_vec_base.unsqueeze(dim=0).to(device)\n \n rot_addi, tsl_addi = calc_angl_rot_tsl(angl[:, idx_rgrp])\n rot_curr, tsl_curr = merge_rot_tsl(\n rot_prev, tsl_prev, rot_base, tsl_base, rot_addi, tsl_addi)\n trans_dict[rgrp_name_curr] = (rot_curr, tsl_curr)\n\n fram_dict[rgrp_name_curr] = \\\n torch.cat([rot_curr, tsl_curr.unsqueeze(dim=1)], dim=1)\n fmsk_vec_dict[rgrp_name_curr] = fmsk[:, 0] * amsk[:, idx_rgrp]\n\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == idx_rgrp + 1]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk_vec_dict[rgrp_name_curr]\n\n cmsk = torch.stack([cmsk_vec_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n cord = torch.stack([cord_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n\n return cord, cmsk\n\n def export_pdb_file(self, seq, atom_cords, path, atom_masks=None, confidence=None, chain_id=None, logger = None):\n \"\"\"Export a PDB file.\"\"\"\n\n # configurations\n i_code = ' '\n chain_id = '0' if chain_id is None else chain_id\n occupancy = 1.0\n cord_min = -999.0\n cord_max = 999.0\n seq_len = len(seq)\n\n n_key_atoms = RNA_CONSTANTS.ATOM_NUM_MAX\n\n # take all the atom coordinates as valid, if not specified\n if atom_masks is None:\n atom_masks = np.ones(atom_cords.shape[:-1], dtype=np.int8)\n\n # determine the set of atom names (per residue)\n if atom_cords.ndim == 2:\n if atom_cords.shape[0] == seq_len * n_key_atoms:\n atom_cords = np.reshape(atom_cords, [seq_len, n_key_atoms, 3])\n atom_masks = np.reshape(atom_masks, [seq_len, n_key_atoms])\n else:\n raise ValueError('atom coordinates\\' shape does not match the sequence length')\n\n elif atom_cords.ndim == 3:\n assert atom_cords.shape[0] == seq_len\n atom_cords = atom_cords\n atom_masks = atom_masks\n\n else:\n raise ValueError('atom coordinates must be a 2D or 3D np.ndarray')\n\n # reset invalid values in atom coordinates\n atom_cords = np.clip(atom_cords, cord_min, cord_max)\n atom_cords[np.isnan(atom_cords)] = 0.0\n atom_cords[np.isinf(atom_cords)] = 0.0\n\n # export the 3D structure to a PDB file\n os.makedirs(os.path.dirname(os.path.realpath(path)), exist_ok=True)\n with open(path, 'w') as o_file:\n n_atoms = 0\n for idx_resd, resd_name in enumerate(seq):\n for idx_atom, atom_name in enumerate(RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]):\n\n temp_factor = 0.0 if confidence is None else \\\n float(100 * confidence.reshape([seq_len])[idx_resd - 1])\n\n if atom_masks[idx_resd, idx_atom] == 0:\n continue\n n_atoms += 1\n charge = atom_name[0]\n line_str = ''.join([\n 'ATOM ',\n '%5d' % n_atoms,\n ' ' + atom_name + ' ' * (3 - len(atom_name)),\n ' %s' % resd_name,\n ' %s' % chain_id,\n ' ' * (4 - len(str(idx_resd + 1))),\n '%s' % str(idx_resd + 1),\n '%s ' % i_code,\n '%8.3f' % atom_cords[idx_resd, idx_atom, 0],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 1],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 2],\n '%6.2f' % occupancy,\n '%6.2f' % temp_factor,\n ' ' * 10,\n '%2s' % charge,\n '%2s' % ' ',\n ])\n assert len(line_str) == 80, 'line length must be exactly 80 characters: ' + line_str\n o_file.write(line_str + '\\n')\n\n if logger is not None:\n logger.info(f' Export PDB file to {path}')" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
11,019
self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, refinenet, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = 1e-8 self.inf = 1e5 self.default_frames = None self.group_idx = None self.atom_mask = None self.lit_positions = None self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) self.refinenet = RefineNet( **refinenet ) if refinenet.enable else None
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps self.fn = torch.nn.LayerNorm(1) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) phase = self.fn(norm) return phase * normed_coors # classes class EGNN(torch.nn.Module): def __init__( self, dim, m_dim=32, ): super().__init__() ''' # Most of the code in this file is based on egnn-pytorch by lucidrains. ''' edge_input_dim = (dim * 2) + 1 self.edge_mlp = torch.nn.Sequential( torch.nn.Linear(edge_input_dim, edge_input_dim * 2), SiLU(), torch.nn.Linear(edge_input_dim * 2, m_dim), SiLU() ) self.coors_norm = CoorsNorm() self.node_mlp = torch.nn.Sequential( torch.nn.Linear(dim + m_dim, dim * 2), SiLU(), torch.nn.Linear(dim * 2, dim), ) self.coors_mlp = torch.nn.Sequential( torch.nn.Linear(m_dim, m_dim * 4), SiLU(), torch.nn.Linear(m_dim * 4, 1) ) def forward(self, feats, coors): rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d') rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True) feats_j = rearrange(feats, 'b j d -> b () j d') feats_i = rearrange(feats, 'b i d -> b i () d') feats_i, feats_j = torch.broadcast_tensors(feats_i, feats_j) edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1) m_ij = self.edge_mlp(edge_input) coor_weights = self.coors_mlp(m_ij) coor_weights = rearrange(coor_weights, 'b i j () -> b i j') rel_coors = self.coors_norm(rel_coors) scale_factor = 1 / 50.0 coors_out = torch.einsum('b i j, b i j c -> b i c', coor_weights * scale_factor, rel_coors) + coors m_i = m_ij.sum(dim=-2) node_mlp_input = torch.cat((feats, m_i), dim=-1) node_out = self.node_mlp(node_mlp_input) + feats return node_out, coors_out class ResEGNN(torch.nn.Module): def __init__(self, corrections=4, dims_in=41, **kwargs): super().__init__() self.layers = torch.nn.ModuleList([EGNN(dim=dims_in, **kwargs) for _ in range(corrections)]) def forward(self, amino, geom, is_fea = False, keep_last_cords = None): output = [] for layer in self.layers: geom_init = geom amino, geom = layer(amino, geom) if keep_last_cords is not None: geom[:, -keep_last_cords:] = geom_init[:, -keep_last_cords:] output.append([amino, geom]) return output if is_fea else geom class PosEmbedding(nn.Embedding): """ """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: torch.Tensor): """Input is expected to be of size [bsz x seqlen].""" mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden) self.linear_2 = Linear(self.c_hidden, self.c_hidden) self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): z[0] = z[0].cpu() # [*, H, N_res, N_res] a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6) def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c) self.linear_2 = Linear(self.c, self.c) self.linear_3 = Linear(self.c, self.c) self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, refinenet, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = 1e-8 self.inf = 1e5 self.default_frames = None self.group_idx = None self.atom_mask = None self.lit_positions = None self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) self.refinenet = RefineNet( **refinenet ) if refinenet.enable else None
self.converter = RNAConverter(
7
2023-11-01 10:29:08+00:00
16k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): # change to 0.01\n for char in text:\n print(char, end='', flush=True)\n time.sleep(delay)\n print()" }, { "identifier": "shop_help", "path": "components/common_functions.py", "snippet": "def shop_help():\n print_slow(Fore.YELLOW + \"Shop Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[buy] - Use the 'buy [upgrade]' command to purchase the upgrade in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "help_user", "path": "components/common_functions.py", "snippet": "def help_user():\n print_slow(Fore.MAGENTA + \"Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[connect] - Use the 'connect' command to hack into Enigma Corps network.\")\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to view and respond to emails from your client and other characters.\")\n print_slow(\"\")\n print_slow(\"[balance] - Use the 'balance' command to view your current earnings which you can spend on upgrades. \")\n print_slow(\"\")\n print_slow(\"[shop] - Use the 'shop' command to view upgrades available in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[help] - Use the 'help' command if you need assistance at any time.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the Main Menu.\")\n print_slow(\"\")" }, { "identifier": "connect_help", "path": "components/common_functions.py", "snippet": "def connect_help():\n print_slow(Fore.MAGENTA + \"Connect Help:\" + Style.RESET_ALL)\n print_slow(\n \"[scan] - Use the 'scan' command to scan the network and search for available systems and vulnerabilities.\")\n print_slow(\"\")\n print_slow(\"[hack] - Use the 'hack [system/vulnerability]' to hack into different systems.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[disconnect] - Use the 'disconnect' command to disconnect from the current system or vulnerability.\")\n print_slow(\"\")" }, { "identifier": "mail_help", "path": "components/common_functions.py", "snippet": "def mail_help():\n print_slow(Fore.LIGHTBLUE_EX + \"Mail Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list all emails.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [subject]' command to read an email with the specified subject.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "system_help", "path": "components/common_functions.py", "snippet": "def system_help():\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to log into the users emails.\")\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list files in a users system.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [file]' command to read files in a users system\")\n print_slow(\"\")" }, { "identifier": "intro_call", "path": "conversations/calls.py", "snippet": "def intro_call():\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Welcome, Cipher. Operation Enigma is our covert mission against Enigma Corp, a powerful and secretive entity.\")\n print_slow(\n \"Your skills and secrecy have brought you to our attention. Your mission is to dig through their systems and servers looking for valuable data.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Got it, Anonymous. Exposing secrets and bringing justice. I'm in.\")\n print_slow(\"What's my first move? Talk to me about this 'EnigmaLink'.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Excellent, Cipher. EnigmaLink is a specialized tool available on the Hacker's Market. It contains a hidden backdoor, allowing access to Enigma Corps servers.\")\n print_slow(\n \"Your task is to acquire EnigmaLink and initiate your infiltration. Use the 'connect' command to navigate the network and gather crucial intelligence.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"EnigmaLink, got it. I'll secure it and initiate the infiltration. What about this employee, Amy?\")\n print_slow(\"You mentioned her password is 'sexinthecity.' What's my objective with her?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Good question, Cipher. Amy is a key target. Use her password to access her computer and gather any pertinent information.\")\n print_slow(\n \"This data is vital to our cause. Be thorough and meticulous in your investigation. The success of our operation depends on it.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Understood, Anonymous. I'll focus on Amy, gather intel, and proceed with precision.\")\n print_slow(\"Consider it done. Anything else I should know before I dive in?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"One last thing, Cipher. All collected data is highly confidential. This contract is binding, and your success is paramount.\")\n print_slow(\"Execute with diligence, and may the odds be in your favor. Good luck, Cipher.\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "first_call", "path": "conversations/calls.py", "snippet": "def first_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"That's a good start, but we already have that information.\")\n print_slow(\"Regardless, I've transferred £20 into the account for your troubles.\")\n print_slow(\"Keep digging Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "second_call", "path": "conversations/calls.py", "snippet": "def second_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Hey Cipher, you nailed it! 'Billy' just spilled the beans about wanting to climb the corporate ladder into management.\")\n print_slow(\n \"This is gold for us. We can guide 'Billy' toward training and workshops that align with our interests, nudging things in our favor.\")\n print_slow(\n \"Picture it – we're pulling the strings, helping 'Billy' grow, and steering the ship where we want it to go.\")\n print_slow(\"Keep the ball rolling, Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "third_call", "path": "conversations/calls.py", "snippet": "def third_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\"\n \"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've stumbled upon a perplexing development regarding Enigma's interest in a mysterious 'compound.'\")\n print_slow(\n \"I'm cross-referencing our existing intel to unveil more details. Stay vigilant and be prepared for the unknown.\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A compound, huh? Any hints on whether we're talking metal, chemicals, or something else entirely?\")\n print_slow(\"This feels like navigating in the dark. What exactly am I dealing with?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Response\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"Cipher, we're in the dark too. Initial reports are unclear—could be metal, chemical, or something beyond our comprehension.\")\n print_slow(\n \"Your mission is to identify the nature of this compound. Exercise extreme caution; this goes deeper than we anticipated.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Inquiry\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"So, we're playing 'guess the compound.' Any leads, any connections I should explore?\")\n print_slow(\"This is starting to sound like one of those high-stakes puzzles.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Clarification\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"I wish I had more details, Cipher. This is uncharted territory for us. Investigate discreetly, and trust no one.\")\n print_slow(\n \"I'll attempt to gather more intel. Stay on the line, and keep me updated on any findings.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fourth_call", "path": "conversations/calls.py", "snippet": "def fourth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've got our hands on an intriguing document – an Employee Performance Review for 'Billy Constantine'.\")\n print_slow(\n \"This could be a goldmine of information. Let's dig in and see if there's anything we can leverage to our advantage.\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"An Employee Performance Review? Interesting choice. What's the scoop on 'Billy Constantine'?\")\n print_slow(\"Give me the details, and we'll figure out our next move.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, 'Billy Constantine' is making waves. The review highlights exceptional performance as a sales representative.\")\n print_slow(\n \"He's exceeding sales targets, mentoring new team members, and earning a solid 4.5/5 rating. A rising star, it seems.\")\n print_slow(\"We might use this to our advantage. Let's explore how we can align his ambitions with our agenda.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Strategy\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A high-performing sales rep, huh? We could steer 'Billy' towards projects that align with our goals.\")\n print_slow(\"Let's use this performance review to our advantage. Maybe mentorship programs, leadership initiatives?\")\n print_slow(\"I'm ready to play this card strategically. What's the next move?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Great thinking, Cipher. Let's work on a plan to subtly guide 'Billy' toward initiatives that benefit us.\")\n print_slow(\"We'll need to dig deeper into 'Billy's' aspirations and weave our influence seamlessly.\")\n print_slow(\"Stay vigilant, Cipher. This could be a game-changer.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fifth_call", "path": "conversations/calls.py", "snippet": "def fifth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've intercepted some Meeting Minutes dated 24/06/2025. It's related to 'Project X' and involves key players.\")\n print_slow(\n \"This could be our chance to uncover more about Enigma's activities. Let's dive into the details and see what we can extract.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"Meeting Minutes, huh? 'Project X' sounds intriguing. Who were the players involved, and what's the agenda?\")\n print_slow(\"I'm ready to dissect this information and uncover any hidden gems.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, the meeting involved key personnel—Amy, Billy, Kyle, and others. 'Project X' is on the agenda, and there's mention of sensitive materials.\")\n print_slow(\n \"This could be a crucial insight into Enigma's plans. Let's analyze the action items and plan our next move.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Analysis\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"'Project X,' sensitive materials, and action items. This is a goldmine of information.\")\n print_slow(\n \"Let's focus on dissecting the action items and see if we can connect the dots. What's our strategy, Anonymous?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Agreed, Cipher. Let's delve into the action items, especially the data compilation and safety protocol training.\")\n print_slow(\"We might uncover more about 'Project X' and gain insights into Enigma's plans.\")\n print_slow(\"Stay sharp, Cipher. This could be a pivotal moment in our mission.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "sixth_call", "path": "conversations/calls.py", "snippet": "def sixth_call():\n print_slow(\"ADD CALL STUFF HERE\")" }, { "identifier": "markus_seen_call", "path": "conversations/calls.py", "snippet": "def markus_seen_call():\n print_slow(\"Something goes here\")" }, { "identifier": "code_shatter_call", "path": "conversations/minigame_calls.py", "snippet": "def code_shatter_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"I see you have bought CodeShatter!\")\n print_slow(\"This item is a one time use upgrade so once you get the password, it is gone so use wisely!\")\n print_slow(\"But don't threat, if you fail, you get a chance to retry. The item is only used when you get the password, so be sure to write it down!\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "code_shatter_minigame", "path": "minigames/code_shatter_minigame.py", "snippet": "def code_shatter_minigame():\n # Generate a random 5-digit number\n target = [str(random.randint(1, 9)) for _ in range(5)]\n\n print_slow(\"Welcome to CodeShatter!\")\n print_slow(\"\")\n print_slow(\"Guess the 5-digit number.\")\n print_slow(\"\")\n print_slow(\"The sequence can contain multiple same numbers\")\n print_slow(\"\")\n print_slow(Fore.GREEN + \"Green: Correct digit in correct position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"Orange: Correct digit in incorrect position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Red: Incorrect digit.\" + Style.RESET_ALL)\n print_slow(\"\")\n\n attempts = 0\n while attempts < 7:\n # Get the user's guess\n guess = input(\"Enter your guess: \")\n\n if len(guess) != 5 or not guess.isdigit():\n print_slow(\"Invalid input. Please enter a 5-digit number.\")\n continue\n\n attempts += 1\n\n # Check the guess against the target\n feedback = []\n for i in range(5):\n if guess[i] == target[i]:\n feedback.append(Fore.GREEN + guess[i] + Style.RESET_ALL)\n elif guess[i] in target:\n feedback.append(Fore.YELLOW + guess[i] + Style.RESET_ALL)\n else:\n feedback.append(Fore.RED + guess[i] + Style.RESET_ALL)\n\n print_slow(\"Feedback: \" + \" \".join(feedback))\n\n # Check if the guess is correct\n if guess == \"\".join(target):\n print_slow(Fore.GREEN + \"Access granted.\" + Style.RESET_ALL)\n break\n else:\n print_slow(Fore.RED + \"Access denied. Too many attempts.\" + Style.RESET_ALL)\n time.sleep(1)\n print_slow(\"\")\n print_slow(Fore.RED + \"Rebooting CodeShatter with new proxy...\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n code_shatter_minigame()" }, { "identifier": "port_scanning", "path": "minigames/eye_spy_minigame.py", "snippet": "def port_scanning():\n num_ports = 10\n open_ports, closed_ports = generate_ports(num_ports)\n attempts = 5\n correct_guesses = 0\n scan_attempts = 2\n\n print_slow(\"Welcome to the Port Scanning minigame!\")\n print_slow(\"\")\n print_slow(f\"Find the open ports in the range 1-{num_ports}.\")\n print_slow(\"\")\n print_slow(f\"You have {attempts} attempts.\")\n print_slow(\"\")\n\n while scan_attempts > 0:\n print_slow(\"\")\n print_slow(f\"\\nYou have {scan_attempts} scan attempts left.\")\n print_slow(\"\")\n start = int(input(\"Enter the start of the range to scan: \"))\n print_slow(\"\")\n end = int(input(\"Enter the end of the range to scan: \"))\n print_slow(\"\")\n\n num_open_ports_in_range = len(open_ports.intersection(range(start, end + 1)))\n print_slow(\"\")\n print_slow(f\"There are {num_open_ports_in_range} open ports in the range {start}-{end}.\")\n\n scan_attempts -= 1\n\n while attempts > 0 and len(open_ports) > 0:\n port = int(input(\"\\nEnter a port number to guess: \"))\n\n if port in open_ports:\n print_slow(Fore.GREEN + \"Port is open!\" + Style.RESET_ALL)\n open_ports.remove(port)\n correct_guesses += 1\n elif port in closed_ports:\n print_slow(Fore.RED + \"Port is closed.\" + Style.RESET_ALL)\n closed_ports.remove(port)\n else:\n print_slow(\"Invalid port number. Please enter a number between 1 and\", num_ports)\n\n attempts -= 1\n\n if len(open_ports) == 0:\n print_slow(\n Fore.GREEN + \"\\nCongratulations! You have successfully found all the open ports and gained access to the camera.\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n else:\n print_slow(\n Fore.RED + f\"\\nHack Failed! You found {correct_guesses} out of {len(open_ports) + correct_guesses} open ports.\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n port_scanning()" }, { "identifier": "AmySystem", "path": "systems/level_1/amy/amy_system.py", "snippet": "class AmySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"return_to_work_form.txt\",\n \"content\": (\n \"Employee Name: _______________\\n\"\n \"Employee ID: ____________\\n\"\n \"Department: _______________\\n\"\n \"Date of Return: ______\\n\\n\"\n \"I, [Employee Name], certify that I have followed the company's \"\n \"guidelines for returning to work after an absence. \"\n \"I understand that it is my responsibility to adhere to all safety \"\n \"protocols and procedures to ensure the health and well-being of my \"\n \"colleagues and myself.\\n\\n\"\n \"I acknowledge that I have completed any necessary training and have \"\n \"been briefed on any updates to the company's policies and procedures. \"\n \"I am aware that I must report any symptoms or exposure to COVID-19 to \"\n \"my supervisor immediately.\\n\\n\"\n \"I am committed to doing my part to maintain a safe and healthy work \"\n \"environment for everyone. I will continue to follow all guidelines \"\n \"and protocols and will cooperate with any additional measures that \"\n \"may be implemented in the future.\\n\\n\"\n \"Signature: [Employee Signature]\\n\"\n \"Date: [Date]\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"benefits_summary.txt\",\n \"content\": (\n \"At Enigma Corps, we believe in taking care of our employees and \"\n \"offer a comprehensive benefits package to support your health, well-being, \"\n \"and financial security. Below is a summary of the benefits available to \"\n \"you as an employee of Enigma Corps.\\n\\n\"\n \"Health Insurance: We offer a choice of medical, dental, and vision \"\n \"plans to meet your needs. Our plans provide coverage for preventive care, \"\n \"hospitalization, prescription drugs, and more.\\n\\n\"\n \"Retirement Savings: We offer a 401(k) plan with a generous company \"\n \"match to help you save for your future. You can choose from a variety of \"\n \"investment options to suit your needs.\\n\\n\"\n \"Paid Time Off: We provide a generous amount of paid time off, \"\n \"including vacation, sick leave, and holiday pay. We also offer paid \"\n \"parental leave for new parents.\\n\\n\"\n \"Flexible Work Arrangements: We understand the importance of work-life \"\n \"balance and offer flexible work arrangements, such as remote work and \"\n \"flexible schedules, where possible.\\n\\n\"\n \"Wellness Programs: We offer a variety of wellness programs and \"\n \"resources to support your physical and mental health, including fitness \"\n \"classes, stress management programs, and counseling services.\\n\\n\"\n \"Professional Development: We are committed to supporting your growth \"\n \"and development and offer a variety of training and development \"\n \"opportunities, including tuition reimbursement, workshops, and seminars.\"\n \"\\n\\n\"\n \"We encourage you to review this summary carefully and take advantage of \"\n \"the benefits available to you. If you have any questions or need further \"\n \"information, please contact the HR department.\"\n )\n },\n ]\n self.emails = [\n {\n \"sender\": \"Amy\",\n \"subject\": \"Can't Stop Thinking About You\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I hope this message finds you in good spirits. I've been meaning to write to you for a while now, but I couldn't find the right words to express what I've been feeling.\\n\\n\"\n \"Ever since that night we spent together, I can't seem to get you out of my mind. There's something about the way you make me feel that I've never experienced before. \"\n \"\\nIt's exhilarating, yet terrifying all at the same time.\\n\\n\"\n \"I know we both have a lot on our plates right now, and I don't want to add any more stress to your life. But I can't help but wonder what could happen if we gave this a real shot. \"\n \"I know it's complicated, and there are a lot of factors to consider, but I think we owe it to ourselves to explore this connection we have.\\n\\n\"\n \"I understand if you're not ready to take that step, and I don't want to pressure you into anything you're not comfortable with. \"\n \"\\nBut I can't shake the feeling that we could have something truly special together.\\n\\n\"\n \"I'd love to hear your thoughts on this, and I'm more than willing to take things slow if that's what you need. Maybe we could meet up for dinner and talk about it in person?\"\n \" I think it would be easier to have this conversation face-to-face.\\n\\n\"\n \"I hope you're doing well, and I look forward to hearing from you soon.\\n\\n\"\n \"Take care,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and ask for your help on the Smith project. I've been having some trouble with the data analysis portion,\"\n \"\\nand I know you have a lot of experience in that area.\\n\\n\"\n \"The project involves analyzing customer feedback data to identify trends and areas for improvement. I've been working on it for a few weeks now, but I'm finding it challenging to make sense of the data and\"\n \"\\ndraw meaningful conclusions.\\n\\n\"\n \"Would you be available for a quick meeting later this week to go over some of the data with me? I would really appreciate your input and guidance on this. \"\n \"\\nI think your expertise could really help me make progress and ensure the success of the project.\\n\\n\"\n \"If you're available, please let me know your preferred date and time, and I'll send out a calendar invite. I'm flexible and can work around your schedule.\\n\\n\"\n \"Thank you in advance for your help, and I look forward to hearing from you soon.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Request for Time Off\",\n \"body\": (\n \"Good Afternoon Katie,\\n\\n\"\n \"I hope this email finds you well. I wanted to request some time off next month for a family vacation. I am planning to be out of the office from 10/09/2024 to 18/09/2024\\n\\n\"\n \"I have been working hard on the Johnson project and have made significant progress. I will make sure to finish up any outstanding work and hand off any ongoing projects to my colleagues before I leave. I will also be available by email in case of any urgent matters.\\n\\n\"\n \"I understand that this is a busy time for the team, and I want to ensure that my absence doesn't cause any disruptions. I have already spoken to Markus and he has kindly agreed to cover for me while I'm away.\\n\\n\"\n \"Thank you for considering my request. I look forward to spending some quality time with my family and coming back to work refreshed and recharged.\"\n \"\\nI am confident that the time off will help me come back with renewed energy and focus.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Apology for the Mistake\",\n \"body\": (\n \"Good Morning Kyle,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and apologize for the mistake I made on the Johnson report. I realize now that I overlooked some important data, and I take full responsibility for it.\\n\\n\"\n \"I have gone back and corrected the report, and I will make sure to double-check my work in the future to avoid any similar mistakes. I have also attached the updated report for your reference.\\n\\n\"\n \"I understand if you are disappointed or frustrated, and I am more than willing to do whatever it takes to make it right. Please let me know if there's anything else I can do to fix this,\"\n \"\\nor if you would like to discuss this further.\\n\\n\"\n \"Once again, I am truly sorry for the mistake, and I appreciate your understanding. I value our working relationship and hope that this incident doesn't tarnish it. I am committed to making amends and ensuring that this doesn't happen again in the future.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I wanted to take a moment to express my gratitude for allowing me to use your computer while mine was being serviced by IT. \"\n \"It was a huge help and allowed me to stay productive during that time.\\n\\n\"\n \"I also noticed that your password is 'football'. While I understand it's easy to remember, it's important to choose a more secure password to protect your accounts.\"\n \"\\nI would recommend changing it to something more complex and unique. You never know who's watching after all.\\n\\n\"\n \"Thanks again for your generosity and understanding.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "BillySystem", "path": "systems/level_1/billy/billy_system.py", "snippet": "class BillySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"cover_letter.txt\",\n \"content\": (\n \"Dear Hiring Manager,\\n\\n\"\n \"I am writing to express my interest in the management position at Enigma Corps. \"\n \"I have been with the company for over 7 years and have consistently demonstrated my commitment to driving excellence and fostering collaboration within the team.\\n\\n\"\n \"During my tenure at Enigma Corps, I have been involved in various projects, including the successful completion of the Q3 deliverables project, where I played a key role in the planning and execution stages. \"\n \"My dedication to achieving project milestones and my ability to work under pressure make me a strong candidate for a management role.\\n\\n\"\n \"I possess strong leadership skills, which I have honed through my experiences in leading teams and coordinating cross-functional efforts. \"\n \"My ability to communicate effectively and build positive relationships with team members and stakeholders has resulted in successful project outcomes and increased productivity.\\n\\n\"\n \"In addition to my technical and leadership skills, I am also committed to continuous learning and professional development. \"\n \"I have participated in various training programs and workshops to enhance my management skills and stay up-to-date with industry trends and best practices.\\n\\n\"\n \"I am excited about the opportunity to contribute to the growth and success of Enigma Corps as a member of the management team. \"\n \"I am confident that my skills and experience will be valuable assets to the company, and I look forward to the opportunity to work closely with the team to drive innovation and excellence.\\n\\n\"\n \"Thank you for considering my application. I am looking forward to the opportunity to discuss my qualifications further and explore how I can contribute to the success of Enigma Corps.\\n\\n\"\n \"Sincerely,\\n\"\n \"Billy Constantine\\n\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"meeting_minutes.txt\",\n \"content\": (\n \"Meeting Minutes\\n\\n\"\n \"Date: 24/06/2025\\n\"\n \"Location: REDACTED\\n\"\n \"Attendees: Amy, REDACTED, Billy, Kyle, REDACTED, REDACTED, REDACTED\\n\\n\"\n \"Agenda:\\n\"\n \"- Discuss progress on Project REDACTED\\n\"\n \"- Review safety protocols for handling sensitive materials\\n\"\n \"- Plan next steps for research and development\\n\\n\"\n \"Action Items:\\n\"\n \"- Compile data from recent experiments and share with team\\n\"\n \"- Schedule training session on updated safety protocols\\n\"\n \"- Develop timeline for next phase of Project X\\n\\n\"\n \"Next Meeting: 05/08/24, 12:00pm\\n\"\n )\n },\n {\n \"name\": \"employee_performance_review.txt\",\n \"content\": (\n \"Employee Performance Review\\n\\n\"\n \"Employee Name: Billy Constantine\\n\"\n \"Employee ID: 035854\\n\"\n \"Review Date: 28/06/2024\\n\\n\"\n \"Performance Summary:\\n\"\n \"Billy has demonstrated exceptional performance in his role as a sales representative. He has consistently exceeded sales targets, built strong relationships with clients, and demonstrated leadership qualities in team meetings and projects.\\n\\n\"\n \"Strengths:\\n\"\n \"- Exceeded quarterly sales targets by 15%.\\n\"\n \"- Successfully onboarded and mentored two new team members.\\n\"\n \"- Demonstrated excellent communication and negotiation skills.\\n\\n\"\n \"Areas for Improvement:\\n\"\n \"- Time management skills can be further developed to ensure all tasks are completed in a timely manner.\\n\"\n \"- Continued development of technical knowledge to stay up-to-date with industry trends.\\n\"\n \"- Strengthen collaboration with cross-functional teams to drive more integrated solutions.\\n\\n\"\n \"Goals for Next Review Period:\\n\"\n \"- Increase sales targets by 20%.\\n\"\n \"- Complete a management training program.\\n\"\n \"- Improve time management skills through prioritization and delegation.\\n\\n\"\n \"Overall Rating: 4.5/5\\n\"\n \"Reviewer Name: Katie Thompson\\n\"\n \"Reviewer Signature: Katie Thompson\\n\"\n \"Date: 28/06/2024\\n\"\n )\n }\n ]\n self.emails = [\n\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Amy,\\n\\n\"\n \"I hope this message finds you in great spirits! I'm more than happy to lend a helping hand with the Smith project. After all, two heads are better than one, especially when it comes to data analysis, right?\\n\\n\"\n \"How about we grab a coffee and chat about the project in person? I think it would be nice to catch up and discuss the data over a cup of joe. I'm sure we can brainstorm some ideas and come up with a game plan together.\\n\\n\"\n \"I'm free [date] at [time], does that work for you? If not, just let me know your availability, and we can find a time that suits us both. I'm really looking forward to our coffee date and tackling the project together.\\n\\n\"\n \"Can't wait to see you and dive into the data!\\n\\n\"\n \"Best,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Project Update\",\n \"body\": (\n \"Hello Team,\\n\\n\"\n \"I wanted to provide everyone with a quick update on our progress with the Q3 deliverables project. We've successfully completed the initial research phase and are now moving into the planning stage.\\n\\n\"\n \"In our last meeting, we discussed the following key points:\\n\"\n \"- Compound Analysis: We've identified a unique compound with potential applications in various industries. Further testing and analysis are required to unlock its full potential.\\n\"\n \"- Resource Management: We've allocated a special team and dedicated resources to handle the delicate nature of this project, ensuring utmost confidentiality and security.\\n\"\n \"- Safety Protocols: We've developed strict safety protocols to handle the compound, and we're conducting regular training sessions to ensure compliance.\\n\\n\"\n \"Our next steps include finalizing the project plan, assigning tasks to team members, and setting deadlines. I would appreciate input and feedback from all team members to ensure we're on the right track. Please review the attached project plan document for more details.\\n\\n\"\n \"Additionally, I want to remind everyone of the confidential nature of this project. It's imperative that we maintain discretion and follow all security protocols to safeguard our work. Let's work together to make this project a success and uphold the company's reputation for innovation and excellence.\\n\\n\"\n \"If you have any questions or concerns, please don't hesitate to reach out. Your cooperation and commitment to this project are greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Can't Stop Thinking About You\",\n \"body\": (\n \"Hey there, Amy,\\n\\n\"\n \"Wow, your message really caught me by surprise! But in the best way possible, of course. I've been trying to play it cool, but I have to admit, I've been thinking about that night a lot too. There was just something electric in the air, wasn't there?\\n\\n\"\n \"I've been tossing and turning, wondering if I should reach out to you or if I should wait for you to make the first move. I guess you beat me to it, and I'm glad you did. It's like you read my mind.\\n\\n\"\n \"I can't deny that there's a certain chemistry between us, and I'm intrigued to see where it could lead. I agree that our lives are complicated, and we don't want to add more stress to each other's plates. But sometimes, taking a risk is what makes life exciting, don't you think?\\n\\n\"\n \"I don't want to rush things or make you feel pressured in any way. I'm more than happy to take things slow and let them unfold naturally. But I can't help but imagine the possibilities if we give this a real shot. We could have something truly special, and I don't want to let that pass us by.\\n\\n\"\n \"How about we meet up for dinner and drinks next week? We can talk about it more and see where the night takes us. I think it would be a fun and relaxed way to get to know each other better and explore this connection we have. What do you say?\\n\\n\"\n \"I hope you're doing well, and I'm eagerly awaiting your reply. Until then, I'll be daydreaming about our next encounter.\\n\\n\"\n \"Take care, and talk to you soon.\\n\\n\"\n \"Yours truly,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Amy,\\n\\n\"\n \"No problem at all! I'm always here to help out when I can. It's what teammates do, right?\\n\\n\"\n \"Oh, and about the password thing – haha, I know it's not the most secure choice. I've been meaning to change it, but I guess old habits die hard, right? \"\n \"Thanks for looking out for me though! I'll try to come up with something a bit more creative next time.\\n\\n\"\n \"If you ever need anything else, just give me a shout. Happy to help!\\n\\n\"\n \"Take care,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Professional Development\",\n \"body\": (\n \"Good Evening Katie,\\n\\n\"\n \"I hope this email finds you well. I'm reaching out to express my interest in professional development opportunities within the company, particularly in the area of management and leadership.\\n\\n\"\n \"I've been with the company for several years now, and I've had the chance to work on various projects and collaborate with different teams. I'm keen to build on this experience and take on more responsibility, and I believe that acquiring the necessary skills for a management role would be a great next step in my career.\\n\\n\"\n \"Could you please provide information on available training programs, workshops, or seminars that focus on leadership development and management skills? I'm particularly interested in areas such as team leadership, strategic planning, conflict resolution, and decision-making.\\n\\n\"\n \"Additionally, if there are any tuition reimbursement programs or resources for management training and certification, I'd like to learn more about them. I'm committed to investing time and effort in my professional growth and believe that these opportunities would greatly benefit both myself and the company.\\n\\n\"\n \"Your guidance and assistance in exploring these options would be greatly appreciated. I look forward to your response and any recommendations you may have.\\n\\n\"\n \"Thank you for your support, and I'm excited about the prospect of contributing to the company's success in a management role.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "camera_first", "path": "systems/level_1/cameras/camera_1.py", "snippet": "def camera_first():\n print(camera_1)\n print()\n print()\n move = input(Fore.GREEN + \"> \" + Style.RESET_ALL)\n\n if move.lower() == \"forward\":\n clear_terminal()\n camera_second()\n elif move.lower() == \"back\":\n print(Fore.RED + \"There is nothing to go back to...\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n camera_first()" }, { "identifier": "MarkusSystem", "path": "systems/level_1/markus/markus_system.py", "snippet": "class MarkusSystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"system_log.txt\",\n \"content\": (\n \"Enigma Corps System Log\\n\\n\"\n \"Date: 2023-11-16 08:00 AM\\n\"\n \"Event Type: System Startup\\n\"\n \"Description: The Enigma Corps systems smoothly initiated startup procedures, ensuring a seamless beginning to the workday.\\n\\n\"\n \"Date: 2023-11-16 10:30 AM\\n\"\n \"Event Type: Network Upgrade\\n\"\n \"Description: Implemented a network upgrade to enhance data transfer speeds, providing improved efficiency across departments.\\n\\n\"\n \"Date: 2023-11-16 01:45 PM\\n\"\n \"Event Type: Security Patch Applied\\n\"\n \"Description: Critical security patch successfully applied to safeguard against potential vulnerabilities, ensuring system integrity.\\n\\n\"\n \"Date: 2023-11-16 04:20 PM\\n\"\n \"Event Type: Server Maintenance\\n\"\n \"Description: Conducted routine maintenance on Enigma Corps servers, optimizing performance and minimizing downtime.\\n\\n\"\n \"This dynamic system log captures key events, from the smooth startup of the day to network upgrades, security enhancements, and routine maintenance. It serves as a valuable record for troubleshooting and analysis, ensuring the optimal functionality of Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"technical_documentation.docx\",\n \"content\": (\n \"Enigma Corps System Technical Documentation\\n\\n\"\n \"1. System Architecture:\\n\"\n \" - Overview of the system's structural design and components.\\n\\n\"\n \"2. Network Configuration:\\n\"\n \" - Details on the configuration of Enigma Corps' network setup for efficient communication.\\n\\n\"\n \"3. Security Protocols:\\n\"\n \" - Comprehensive overview of security measures and protocols implemented to safeguard sensitive data.\\n\\n\"\n \"4. Troubleshooting Guide:\\n\"\n \" - Step-by-step guide for identifying and resolving common issues to ensure seamless system functionality.\\n\\n\"\n \"5. Software Installation Procedures:\\n\"\n \" - Instructions for installing and updating software components within the Enigma Corps system.\\n\\n\"\n \"6. Hardware Specifications:\\n\"\n \" - Detailed specifications of the hardware components utilized in the Enigma Corps infrastructure.\\n\\n\"\n \"This meticulously crafted technical documentation serves as a go-to resource for understanding the Enigma Corps system, covering everything from its architecture and network configuration to security protocols, troubleshooting, and hardware specifications. It's an invaluable reference for maintaining optimal system performance.\"\n )\n },\n {\n \"name\": \"passwords.txt\",\n \"content\": (\n \"Sensitive Password Information for Enigma Corps\\n\\n\"\n \"Admin Password: *********\\n\"\n \"Database Password: *********\\n\"\n \"Router Password: *********\\n\"\n \"WiFi Password: *********\\n\"\n \"Encryption Key: *********\\n\\n\"\n \"Warning: This file contains confidential information. Keep it secure, and refrain from sharing passwords without explicit authorization. Safeguarding this information is crucial to maintaining the security and integrity of the Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"software_inventory.csv\",\n \"content\": (\n \"Software Inventory for Enigma Corps\\n\\n\"\n \"Software Name, Version, License Key\\n\"\n \"1. Enigma Security Suite, v2.0, X1Y2Z3A4-B5C6D7E8-F9G0H1I2\\n\"\n \"2. DataGuard Backup, v1.5, Y3X2W1V0-U9T8S7R6-Q5P4O3N2\\n\"\n \"3. Office Suite, v2022, Z9Z8Z7Z6-Z5Z4Z3Z2-Z1Z0Z9Z8-Z7Z6Z5\\n\"\n \"4. VPN Client, v3.1, W6W5W4W3-W2W1W0-W9W8W7-W6W5W4\\n\"\n \"5. Project Management Tool, v4.2, VV8V7V6V5-V4V3V2V1-V0V9V8V7-V6V5V4\\n\\n\"\n \"Important: This inventory is crucial for tracking and managing software across Enigma Corps systems. The provided license keys are randomized for security reasons. Handle this information responsibly, and ensure it is only accessible to authorized personnel to maintain the security and compliance of our software assets.\"\n )\n }\n ]\n self.emails = [\n # Email to Management\n {\n \"sender\": \"Markus\",\n \"subject\": \"System Maintenance Scheduled\",\n \"body\": (\n \"Dear Michael,\\n\\n\"\n \"I hope this email finds you well. We wanted to inform you that we have scheduled a system maintenance session for the upcoming weekend to ensure the optimal performance and security of our systems.\\n\\n\"\n \"Maintenance Details:\\n\"\n \"- Date: 16/12/23 - 17/12/23\\n\"\n \"- Time: 3:00pm\\n\"\n \"- Duration: 1 Hour\\n\"\n \"- Impact: No impact expected\\n\\n\"\n \"During this period, there might be temporary disruptions in certain services. Our team will be working diligently to minimize any inconvenience. If you have any concerns or specific considerations, please feel free to reach out to us.\\n\\n\"\n \"Thank you for your understanding and cooperation.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Department\"\n )\n },\n {\n # Email to Employees\n \"sender\": \"Markus\",\n \"subject\": \"Upcoming Software Update\",\n \"body\": (\n \"Good afternoon, Kyle,\\n\\n\"\n \"We hope you're doing well. Our IT team is excited to inform you about an upcoming software update that will enhance the functionality and security of our systems. The update is scheduled for [Date] at [Time]. Please take note of the following details:\\n\\n\"\n \"- Expected Duration: Two Days\\n\"\n \"- Action Required: As this will be processed during the weekend, no action is required.\\n\"\n \"- Impact: While we anticipate minimal impact on your day-to-day activities, it's essential to be aware of any potential changes. These include: New UI to navigate, logging in or logging out issues.\\n\\n\"\n \"We recommend saving your work and logging out of your system before the update. If you encounter any issues post-update, don't hesitate to contact our IT support team for assistance.\\n\\n\"\n \"Thank you for your cooperation and understanding.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Support Team\"\n )\n },\n # Email from Markus to Billy\n {\n \"sender\": \"Markus\",\n \"subject\": \"Urgent: Password Security Update Required\",\n \"body\": (\n \"Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to bring to your attention the importance of updating your current password. This is not the first time I've raised this concern, and I want to emphasize its critical nature.\\n\\n\"\n \"In recent security assessments, it has been flagged that your current password might not meet the latest security standards. To ensure the safety of your account and our overall cybersecurity, it is imperative that you change your password promptly.\\n\\n\"\n \"I understand that these reminders may seem repetitive, but they stem from a genuine concern for the security of your account and our collective responsibility in maintaining a robust cybersecurity posture.\\n\\n\"\n \"Please take a moment at your earliest convenience to update your password. If you encounter any issues or have questions, feel free to reach out. Your cooperation is greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Markus, Security Team\"\n )\n }\n\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" } ]
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
13,717
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = []
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = []
amy_system = AmySystem()
18
2023-11-06 09:52:13+00:00
16k
Codra-Ingenierie-Informatique/DataLab
cdl/core/gui/objectview.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):" }, { "identifier": "ObjectGroup", "path": "cdl/core/gui/objectmodel.py", "snippet": "class ObjectGroup:\n \"\"\"Represents a DataLab object group\"\"\"\n\n PREFIX = \"g\"\n\n def __init__(self, title: str, model: ObjectModel) -> None:\n self.model = model\n self.uuid: str = str(uuid4()) # Group uuid\n self.__objects: list[str] = [] # list of object uuids\n self.__title: str = title\n self.__gnb = 0\n\n @property\n def number(self) -> int:\n \"\"\"Return group number (used for short ID)\"\"\"\n return self.__gnb\n\n @number.setter\n def number(self, gnb: int):\n \"\"\"Set group number (used for short ID)\"\"\"\n self.__gnb = gnb\n\n @property\n def short_id(self):\n \"\"\"Short group ID\"\"\"\n return f\"{self.PREFIX}{self.__gnb:03d}\"\n\n @property\n def title(self) -> str:\n \"\"\"Return group title\"\"\"\n return self.__title\n\n @title.setter\n def title(self, title: str) -> None:\n \"\"\"Set group title\"\"\"\n self.__title = title\n\n def __iter__(self) -> Iterator[SignalObj | ImageObj]:\n \"\"\"Iterate over objects in group\"\"\"\n return iter(self.model.get_objects(self.__objects))\n\n def __len__(self) -> int:\n \"\"\"Return number of objects in group\"\"\"\n return len(self.__objects)\n\n def __getitem__(self, index: int) -> SignalObj | ImageObj:\n \"\"\"Return object at index\"\"\"\n return self.model[self.__objects[index]]\n\n def __contains__(self, obj: SignalObj | ImageObj) -> bool:\n \"\"\"Return True if obj is in group\"\"\"\n return obj.uuid in self.__objects\n\n def append(self, obj: SignalObj | ImageObj) -> None:\n \"\"\"Append object to group\"\"\"\n self.__objects.append(obj.uuid)\n\n def insert(self, index: int, obj: SignalObj | ImageObj) -> None:\n \"\"\"Insert object at index\"\"\"\n fix_titles(self.model.get_all_objects(), obj, \"add\")\n self.__objects.insert(index, obj.uuid)\n\n def remove(self, obj: SignalObj | ImageObj) -> None:\n \"\"\"Remove object from group\"\"\"\n fix_titles(self.model.get_all_objects(), obj, \"remove\")\n self.__objects.remove(obj.uuid)\n\n def clear(self) -> None:\n \"\"\"Clear group\"\"\"\n self.__objects.clear()\n\n def get_objects(self) -> list[SignalObj | ImageObj]:\n \"\"\"Return objects in group\"\"\"\n return self.model.get_objects(self.__objects)\n\n def get_object_ids(self) -> list[str]:\n \"\"\"Return object ids in group\"\"\"\n return self.__objects" }, { "identifier": "ImageObj", "path": "cdl/core/model/image.py", "snippet": "class ImageObj(gds.DataSet, base.BaseObj):\n \"\"\"Image object\"\"\"\n\n PREFIX = \"i\"\n CONF_FMT = Conf.view.ima_format\n DEFAULT_FMT = \".1f\"\n VALID_DTYPES = (\n np.uint8,\n np.uint16,\n np.int16,\n np.int32,\n np.float32,\n np.float64,\n np.complex128,\n )\n\n def __init__(self, title=None, comment=None, icon=\"\"):\n \"\"\"Constructor\n\n Args:\n title (str): title\n comment (str): comment\n icon (str): icon\n \"\"\"\n gds.DataSet.__init__(self, title, comment, icon)\n base.BaseObj.__init__(self)\n self.regenerate_uuid()\n self._dicom_template = None\n self._maskdata_cache = None\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n self.uuid = str(uuid4())\n\n @property\n def size(self) -> tuple[int, int]:\n \"\"\"Returns (width, height)\"\"\"\n return self.data.shape[1], self.data.shape[0]\n\n def __add_metadata(self, key: str, value: Any) -> None:\n \"\"\"Add value to metadata if value can be converted into builtin/NumPy type\n\n Args:\n key (str): key\n value (Any): value\n \"\"\"\n stored_val = to_builtin(value)\n if stored_val is not None:\n self.metadata[key] = stored_val\n\n def set_metadata_from(self, obj: Mapping | dict) -> None:\n \"\"\"Set metadata from object: dict-like (only string keys are considered)\n or any other object (iterating over supported attributes)\n\n Args:\n obj (Mapping | dict): object\n \"\"\"\n self.reset_metadata_to_defaults()\n ptn = r\"__[\\S_]*__$\"\n if isinstance(obj, Mapping):\n for key, value in obj.items():\n if isinstance(key, str) and not re.match(ptn, key):\n self.__add_metadata(key, value)\n else:\n for attrname in dir(obj):\n if attrname != \"GroupLength\" and not re.match(ptn, attrname):\n try:\n attr = getattr(obj, attrname)\n if not callable(attr) and attr:\n self.__add_metadata(attrname, attr)\n except AttributeError:\n pass\n\n @property\n def dicom_template(self):\n \"\"\"Get DICOM template\"\"\"\n return self._dicom_template\n\n @dicom_template.setter\n def dicom_template(self, template):\n \"\"\"Set DICOM template\"\"\"\n if template is not None:\n ipp = getattr(template, \"ImagePositionPatient\", None)\n if ipp is not None:\n self.x0, self.y0 = float(ipp[0]), float(ipp[1])\n pxs = getattr(template, \"PixelSpacing\", None)\n if pxs is not None:\n self.dy, self.dx = float(pxs[0]), float(pxs[1])\n self.set_metadata_from(template)\n self._dicom_template = template\n\n uuid = gds.StringItem(\"UUID\").set_prop(\"display\", hide=True)\n\n _tabs = gds.BeginTabGroup(\"all\")\n\n _datag = gds.BeginGroup(_(\"Data\"))\n data = gds.FloatArrayItem(_(\"Data\"))\n metadata = gds.DictItem(_(\"Metadata\"), default={})\n _e_datag = gds.EndGroup(_(\"Data\"))\n\n _dxdyg = gds.BeginGroup(f'{_(\"Origin\")} / {_(\"Pixel spacing\")}')\n _origin = gds.BeginGroup(_(\"Origin\"))\n x0 = gds.FloatItem(\"X<sub>0</sub>\", default=0.0)\n y0 = gds.FloatItem(\"Y<sub>0</sub>\", default=0.0).set_pos(col=1)\n _e_origin = gds.EndGroup(_(\"Origin\"))\n _pixel_spacing = gds.BeginGroup(_(\"Pixel spacing\"))\n dx = gds.FloatItem(\"Δx\", default=1.0, nonzero=True)\n dy = gds.FloatItem(\"Δy\", default=1.0, nonzero=True).set_pos(col=1)\n _e_pixel_spacing = gds.EndGroup(_(\"Pixel spacing\"))\n _e_dxdyg = gds.EndGroup(f'{_(\"Origin\")} / {_(\"Pixel spacing\")}')\n\n _unitsg = gds.BeginGroup(f'{_(\"Titles\")} / {_(\"Units\")}')\n title = gds.StringItem(_(\"Image title\"), default=_(\"Untitled\"))\n _tabs_u = gds.BeginTabGroup(\"units\")\n _unitsx = gds.BeginGroup(_(\"X-axis\"))\n xlabel = gds.StringItem(_(\"Title\"), default=\"\")\n xunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsx = gds.EndGroup(_(\"X-axis\"))\n _unitsy = gds.BeginGroup(_(\"Y-axis\"))\n ylabel = gds.StringItem(_(\"Title\"), default=\"\")\n yunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsy = gds.EndGroup(_(\"Y-axis\"))\n _unitsz = gds.BeginGroup(_(\"Z-axis\"))\n zlabel = gds.StringItem(_(\"Title\"), default=\"\")\n zunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsz = gds.EndGroup(_(\"Z-axis\"))\n _e_tabs_u = gds.EndTabGroup(\"units\")\n _e_unitsg = gds.EndGroup(f'{_(\"Titles\")} / {_(\"Units\")}')\n\n _e_tabs = gds.EndTabGroup(\"all\")\n\n @property\n def xc(self) -> float:\n \"\"\"Return image center X-axis coordinate\"\"\"\n return self.x0 + 0.5 * self.data.shape[1] * self.dx\n\n @property\n def yc(self) -> float:\n \"\"\"Return image center Y-axis coordinate\"\"\"\n return self.y0 + 0.5 * self.data.shape[0] * self.dy\n\n def get_data(self, roi_index: int | None = None) -> np.ndarray:\n \"\"\"\n Return original data (if ROI is not defined or `roi_index` is None),\n or ROI data (if both ROI and `roi_index` are defined).\n\n Args:\n roi_index (int): ROI index\n\n Returns:\n numpy.ndarray: masked data\n \"\"\"\n if self.roi is None or roi_index is None:\n return self.data\n roidataitem = RoiDataItem(self.roi[roi_index])\n return roidataitem.get_masked_view(self.data, self.maskdata)\n\n def copy(self, title: str | None = None, dtype: np.dtype | None = None) -> ImageObj:\n \"\"\"Copy object.\n\n Args:\n title (str): title\n dtype (numpy.dtype): data type\n\n Returns:\n ImageObj: copied object\n \"\"\"\n title = self.title if title is None else title\n obj = ImageObj(title=title)\n obj.title = title\n obj.x0 = self.x0\n obj.y0 = self.y0\n obj.dx = self.dx\n obj.dy = self.dy\n obj.metadata = deepcopy(self.metadata)\n obj.data = np.array(self.data, copy=True, dtype=dtype)\n obj.dicom_template = self.dicom_template\n return obj\n\n def set_data_type(self, dtype: np.dtype) -> None:\n \"\"\"Change data type.\n\n Args:\n dtype (numpy.dtype): data type\n \"\"\"\n self.data = np.array(self.data, dtype=dtype)\n\n def __viewable_data(self) -> np.ndarray:\n \"\"\"Return viewable data\"\"\"\n data = self.data.real\n if np.any(np.isnan(data)):\n data = np.nan_to_num(data, posinf=0, neginf=0)\n return data\n\n def update_plot_item_parameters(self, item: MaskedImageItem) -> None:\n \"\"\"Update plot item parameters from object data/metadata\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been overriden by object metadata entries or other object data. The goal\n is to update the plot item accordingly.\n\n This is *almost* the inverse operation of `update_metadata_from_plot_item`.\n\n Args:\n item: plot item\n \"\"\"\n for axis in (\"x\", \"y\", \"z\"):\n unit = getattr(self, axis + \"unit\")\n fmt = r\"%.1f\"\n if unit:\n fmt = r\"%.1f (\" + unit + \")\"\n setattr(item.param, axis + \"format\", fmt)\n # Updating origin and pixel spacing\n has_origin = self.x0 is not None and self.y0 is not None\n has_pixelspacing = self.dx is not None and self.dy is not None\n if has_origin or has_pixelspacing:\n x0, y0, dx, dy = 0.0, 0.0, 1.0, 1.0\n if has_origin:\n x0, y0 = self.x0, self.y0\n if has_pixelspacing:\n dx, dy = self.dx, self.dy\n shape = self.data.shape\n item.param.xmin, item.param.xmax = x0, x0 + dx * shape[1]\n item.param.ymin, item.param.ymax = y0, y0 + dy * shape[0]\n lut_range = self.metadata.get(\"lut_range\")\n if lut_range is not None:\n item.set_lut_range(lut_range)\n super().update_plot_item_parameters(item)\n\n def update_metadata_from_plot_item(self, item: MaskedImageItem) -> None:\n \"\"\"Update metadata from plot item.\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been modified by the user through the plot item GUI. The goal is to\n update the metadata accordingly.\n\n This is *almost* the inverse operation of `update_plot_item_parameters`.\n\n Args:\n item: plot item\n \"\"\"\n super().update_metadata_from_plot_item(item)\n # Storing the LUT range in metadata:\n lut_range = list(item.get_lut_range())\n self.metadata[\"lut_range\"] = lut_range\n\n def make_item(self, update_from: MaskedImageItem | None = None) -> MaskedImageItem:\n \"\"\"Make plot item from data.\n\n Args:\n update_from (MaskedImageItem | None): update from plot item\n\n Returns:\n MaskedImageItem: plot item\n \"\"\"\n data = self.__viewable_data()\n item = make.maskedimage(\n data,\n self.maskdata,\n title=self.title,\n colormap=\"jet\",\n eliminate_outliers=Conf.view.ima_eliminate_outliers.get(),\n interpolation=\"nearest\",\n show_mask=True,\n )\n if update_from is None:\n self.update_plot_item_parameters(item)\n else:\n update_dataset(item.param, update_from.param)\n item.param.update_item(item)\n return item\n\n def update_item(self, item: MaskedImageItem, data_changed: bool = True) -> None:\n \"\"\"Update plot item from data.\n\n Args:\n item (MaskedImageItem): plot item\n data_changed (bool): if True, data has changed\n \"\"\"\n if data_changed:\n item.set_data(self.__viewable_data(), lut_range=[item.min, item.max])\n item.set_mask(self.maskdata)\n item.param.label = self.title\n self.update_plot_item_parameters(item)\n item.plot().update_colormap_axis(item)\n\n def get_roi_param(self, title, *defaults) -> gds.DataSet:\n \"\"\"Return ROI parameters dataset.\n\n Args:\n title (str): title\n *defaults: default values\n \"\"\"\n roidataitem = RoiDataItem(defaults)\n xd0, yd0, xd1, yd1 = defaults\n if roidataitem.geometry is RoiDataGeometries.RECTANGLE:\n param = RectangleROIParam(title)\n param.x0 = xd0\n param.y0 = yd0\n param.x1 = xd1\n param.y1 = yd1\n else:\n param = CircularROIParam(title)\n param.xc = int(0.5 * (xd0 + xd1))\n param.yc = yd0\n param.r = int(0.5 * (xd1 - xd0))\n return param\n\n @staticmethod\n def params_to_roidata(params: gds.DataSetGroup) -> np.ndarray | None:\n \"\"\"Convert ROI dataset group to ROI array data.\n\n Args:\n params (DataSetGroup): ROI dataset group\n\n Returns:\n numpy.ndarray: ROI array data\n \"\"\"\n roilist = []\n for roiparam in params.datasets:\n roiparam: RectangleROIParam | CircularROIParam\n roilist.append(roiparam.get_coords())\n if len(roilist) == 0:\n return None\n return np.array(roilist, int)\n\n def new_roi_item(\n self, fmt: str, lbl: bool, editable: bool, geometry: RoiDataGeometries\n ) -> MaskedImageItem:\n \"\"\"Return a new ROI item from scratch\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n geometry (RoiDataGeometries): ROI geometry\n \"\"\"\n roidataitem = RoiDataItem.from_image(self, geometry)\n return roidataitem.make_roi_item(None, fmt, lbl, editable)\n\n def roi_coords_to_indexes(self, coords: list) -> np.ndarray:\n \"\"\"Convert ROI coordinates to indexes.\n\n Args:\n coords (list): coordinates\n\n Returns:\n numpy.ndarray: indexes\n \"\"\"\n indexes = np.array(coords)\n if indexes.size > 0:\n indexes[:, ::2] -= self.x0 + 0.5 * self.dx\n indexes[:, ::2] /= self.dx\n indexes[:, 1::2] -= self.y0 + 0.5 * self.dy\n indexes[:, 1::2] /= self.dy\n return np.array(indexes, int)\n\n def iterate_roi_items(self, fmt: str, lbl: bool, editable: bool = True) -> Iterator:\n \"\"\"Make plot item representing a Region of Interest.\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n\n Yields:\n PlotItem: plot item\n \"\"\"\n if self.roi is not None:\n roicoords = np.array(self.roi, float)\n roicoords[:, ::2] *= self.dx\n roicoords[:, ::2] += self.x0 - 0.5 * self.dx\n roicoords[:, 1::2] *= self.dy\n roicoords[:, 1::2] += self.y0 - 0.5 * self.dy\n for index, coords in enumerate(roicoords):\n roidataitem = RoiDataItem(coords)\n yield roidataitem.make_roi_item(index, fmt, lbl, editable)\n\n @property\n def maskdata(self) -> np.ndarray:\n \"\"\"Return masked data (areas outside defined regions of interest)\n\n Returns:\n numpy.ndarray: masked data\n \"\"\"\n roi_changed = self.roi_has_changed()\n if self.roi is None:\n if roi_changed:\n self._maskdata_cache = None\n elif roi_changed or self._maskdata_cache is None:\n mask = np.ones_like(self.data, dtype=bool)\n for roirow in self.roi:\n roidataitem = RoiDataItem(roirow)\n roi_mask = roidataitem.apply_mask(self.data, yxratio=self.dy / self.dx)\n mask &= roi_mask\n self._maskdata_cache = mask\n return self._maskdata_cache\n\n def invalidate_maskdata_cache(self) -> None:\n \"\"\"Invalidate mask data cache: force to rebuild it\"\"\"\n self._maskdata_cache = None\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add label with title annotation\n\n Args:\n title (str): title (if None, use image title)\n \"\"\"\n title = self.title if title is None else title\n if title:\n label = make.label(title, (self.x0, self.y0), (10, 10), \"TL\")\n self.add_annotations_from_items([label])" }, { "identifier": "SignalObj", "path": "cdl/core/model/signal.py", "snippet": "class SignalObj(gds.DataSet, base.BaseObj):\n \"\"\"Signal object\"\"\"\n\n PREFIX = \"s\"\n CONF_FMT = Conf.view.sig_format\n DEFAULT_FMT = \"g\"\n VALID_DTYPES = (np.float32, np.float64, np.complex128)\n\n uuid = gds.StringItem(\"UUID\").set_prop(\"display\", hide=True)\n\n _tabs = gds.BeginTabGroup(\"all\")\n\n _datag = gds.BeginGroup(_(\"Data and metadata\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n xydata = gds.FloatArrayItem(_(\"Data\"), transpose=True, minmax=\"rows\")\n metadata = gds.DictItem(_(\"Metadata\"), default={})\n _e_datag = gds.EndGroup(_(\"Data and metadata\"))\n\n _unitsg = gds.BeginGroup(_(\"Titles and units\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n _tabs_u = gds.BeginTabGroup(\"units\")\n _unitsx = gds.BeginGroup(_(\"X-axis\"))\n xlabel = gds.StringItem(_(\"Title\"), default=\"\")\n xunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsx = gds.EndGroup(_(\"X-axis\"))\n _unitsy = gds.BeginGroup(_(\"Y-axis\"))\n ylabel = gds.StringItem(_(\"Title\"), default=\"\")\n yunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsy = gds.EndGroup(_(\"Y-axis\"))\n _e_tabs_u = gds.EndTabGroup(\"units\")\n _e_unitsg = gds.EndGroup(_(\"Titles and units\"))\n\n _e_tabs = gds.EndTabGroup(\"all\")\n\n def __init__(self, title=None, comment=None, icon=\"\"):\n \"\"\"Constructor\n\n Args:\n title (str): title\n comment (str): comment\n icon (str): icon\n \"\"\"\n gds.DataSet.__init__(self, title, comment, icon)\n base.BaseObj.__init__(self)\n self.regenerate_uuid()\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n self.uuid = str(uuid4())\n\n def copy(\n self, title: str | None = None, dtype: np.dtype | None = None\n ) -> SignalObj:\n \"\"\"Copy object.\n\n Args:\n title (str): title\n dtype (numpy.dtype): data type\n\n Returns:\n SignalObj: copied object\n \"\"\"\n title = self.title if title is None else title\n obj = SignalObj(title=title)\n obj.title = title\n if dtype not in (None, float, complex, np.complex128):\n raise RuntimeError(\"Signal data only supports float64/complex128 dtype\")\n obj.metadata = deepcopy(self.metadata)\n obj.xydata = np.array(self.xydata, copy=True, dtype=dtype)\n return obj\n\n def set_data_type(self, dtype: np.dtype) -> None: # pylint: disable=unused-argument\n \"\"\"Change data type.\n\n Args:\n dtype (numpy.dtype): data type\n \"\"\"\n raise RuntimeError(\"Setting data type is not support for signals\")\n\n def set_xydata(\n self,\n x: np.ndarray | list,\n y: np.ndarray | list,\n dx: np.ndarray | list | None = None,\n dy: np.ndarray | list | None = None,\n ) -> None:\n \"\"\"Set xy data\n\n Args:\n x (numpy.ndarray): x data\n y (numpy.ndarray): y data\n dx (numpy.ndarray): dx data (optional: error bars)\n dy (numpy.ndarray): dy data (optional: error bars)\n \"\"\"\n if x is not None:\n x = np.array(x)\n if y is not None:\n y = np.array(y)\n if dx is not None:\n dx = np.array(dx)\n if dy is not None:\n dy = np.array(dy)\n if dx is None and dy is None:\n self.xydata = np.vstack([x, y])\n else:\n if dx is None:\n dx = np.zeros_like(dy)\n if dy is None:\n dy = np.zeros_like(dx)\n self.xydata = np.vstack((x, y, dx, dy))\n\n def __get_x(self) -> np.ndarray | None:\n \"\"\"Get x data\"\"\"\n if self.xydata is not None:\n return self.xydata[0]\n return None\n\n def __set_x(self, data) -> None:\n \"\"\"Set x data\"\"\"\n self.xydata[0] = np.array(data)\n\n def __get_y(self) -> np.ndarray | None:\n \"\"\"Get y data\"\"\"\n if self.xydata is not None:\n return self.xydata[1]\n return None\n\n def __set_y(self, data) -> None:\n \"\"\"Set y data\"\"\"\n self.xydata[1] = np.array(data)\n\n def __get_dx(self) -> np.ndarray | None:\n \"\"\"Get dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n return self.xydata[2]\n return None\n\n def __set_dx(self, data) -> None:\n \"\"\"Set dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n self.xydata[2] = np.array(data)\n else:\n raise ValueError(\"dx data not available\")\n\n def __get_dy(self) -> np.ndarray | None:\n \"\"\"Get dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n return self.xydata[3]\n return None\n\n def __set_dy(self, data) -> None:\n \"\"\"Set dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n self.xydata[3] = np.array(data)\n else:\n raise ValueError(\"dy data not available\")\n\n x = property(__get_x, __set_x)\n y = data = property(__get_y, __set_y)\n dx = property(__get_dx, __set_dx)\n dy = property(__get_dy, __set_dy)\n\n def get_data(self, roi_index: int | None = None) -> np.ndarray:\n \"\"\"\n Return original data (if ROI is not defined or `roi_index` is None),\n or ROI data (if both ROI and `roi_index` are defined).\n\n Args:\n roi_index (int): ROI index\n\n Returns:\n numpy.ndarray: data\n \"\"\"\n if self.roi is None or roi_index is None:\n return self.x, self.y\n i1, i2 = self.roi[roi_index, :]\n return self.x[i1:i2], self.y[i1:i2]\n\n def update_plot_item_parameters(self, item: CurveItem) -> None:\n \"\"\"Update plot item parameters from object data/metadata\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been overriden by object metadata entries or other object data. The goal\n is to update the plot item accordingly.\n\n This is *almost* the inverse operation of `update_metadata_from_plot_item`.\n\n Args:\n item: plot item\n \"\"\"\n update_dataset(item.param.line, self.metadata)\n update_dataset(item.param.symbol, self.metadata)\n super().update_plot_item_parameters(item)\n\n def update_metadata_from_plot_item(self, item: CurveItem) -> None:\n \"\"\"Update metadata from plot item.\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been modified by the user through the plot item GUI. The goal is to\n update the metadata accordingly.\n\n This is *almost* the inverse operation of `update_plot_item_parameters`.\n\n Args:\n item: plot item\n \"\"\"\n super().update_metadata_from_plot_item(item)\n restore_dataset(item.param.line, self.metadata)\n restore_dataset(item.param.symbol, self.metadata)\n\n def make_item(self, update_from: CurveItem = None) -> CurveItem:\n \"\"\"Make plot item from data.\n\n Args:\n update_from (CurveItem): plot item to update from\n\n Returns:\n CurveItem: plot item\n \"\"\"\n if len(self.xydata) in (2, 3, 4):\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item = make.mcurve(x.real, y.real, label=self.title)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item = make.merror(x.real, y.real, dy.real, label=self.title)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item = make.merror(x.real, y.real, dx.real, dy.real, label=self.title)\n CurveStyles.apply_style(item.param)\n else:\n raise RuntimeError(\"data not supported\")\n if update_from is None:\n if execenv.demo_mode:\n item.param.line.width = 3\n self.update_plot_item_parameters(item)\n else:\n update_dataset(item.param, update_from.param)\n item.update_params()\n return item\n\n def update_item(self, item: CurveItem, data_changed: bool = True) -> None:\n \"\"\"Update plot item from data.\n\n Args:\n item (CurveItem): plot item\n data_changed (bool): if True, data has changed\n \"\"\"\n if data_changed:\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item.set_data(x.real, y.real)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item.set_data(x.real, y.real, dy=dy.real)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item.set_data(x.real, y.real, dx.real, dy.real)\n item.param.label = self.title\n self.update_plot_item_parameters(item)\n\n def roi_coords_to_indexes(self, coords: list) -> np.ndarray:\n \"\"\"Convert ROI coordinates to indexes.\n\n Args:\n coords (list): coordinates\n\n Returns:\n numpy.ndarray: indexes\n \"\"\"\n indexes = np.array(coords, int)\n for row in range(indexes.shape[0]):\n for col in range(indexes.shape[1]):\n x0 = coords[row][col]\n indexes[row, col] = np.abs(self.x - x0).argmin()\n return indexes\n\n def get_roi_param(self, title: str, *defaults) -> gds.DataSet:\n \"\"\"Return ROI parameters dataset.\n\n Args:\n title (str): title\n *defaults: default values\n \"\"\"\n imax = len(self.x) - 1\n i0, i1 = defaults\n param = ROIParam(title)\n param.col1 = i0\n param.col2 = i1\n param.set_global_prop(\"data\", min=-1, max=imax)\n return param\n\n @staticmethod\n def params_to_roidata(params: gds.DataSetGroup) -> np.ndarray:\n \"\"\"Convert ROI dataset group to ROI array data.\n\n Args:\n params (DataSetGroup): ROI dataset group\n\n Returns:\n numpy.ndarray: ROI array data\n \"\"\"\n roilist = []\n for roiparam in params.datasets:\n roiparam: ROIParam\n roilist.append([roiparam.col1, roiparam.col2])\n if len(roilist) == 0:\n return None\n return np.array(roilist, int)\n\n def new_roi_item(self, fmt: str, lbl: bool, editable: bool):\n \"\"\"Return a new ROI item from scratch\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n \"\"\"\n coords = self.x.min(), self.x.max()\n return base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n \"ROI\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def iterate_roi_items(self, fmt: str, lbl: bool, editable: bool = True):\n \"\"\"Make plot item representing a Region of Interest.\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n\n Yields:\n PlotItem: plot item\n \"\"\"\n if self.roi is not None:\n for index, coords in enumerate(self.x[self.roi]):\n yield base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n f\"ROI{index:02d}\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add label with title annotation\n\n Args:\n title (str): title (if None, use signal title)\n \"\"\"\n title = self.title if title is None else title\n if title:\n label = make.label(title, \"TL\", (0, 0), \"TL\")\n self.add_annotations_from_items([label])" }, { "identifier": "block_signals", "path": "cdl/utils/qthelpers.py", "snippet": "@contextmanager\ndef block_signals(widget: QW.QWidget, enable: bool) -> Generator[None, None, None]:\n \"\"\"Eventually block/unblock widget Qt signals before/after doing some things\n (enable: True if feature is enabled)\"\"\"\n if enable:\n widget.blockSignals(True)\n try:\n yield\n finally:\n if enable:\n widget.blockSignals(False)" } ]
import os from collections.abc import Iterator from typing import TYPE_CHECKING from guidata.configtools import get_icon from qtpy import QtCore as QC from qtpy import QtGui as QG from qtpy import QtWidgets as QW from cdl.config import _ from cdl.core.gui.objectmodel import ObjectGroup from cdl.core.model.image import ImageObj from cdl.core.model.signal import SignalObj from cdl.utils.qthelpers import block_signals from cdl.core.gui.objectmodel import ObjectModel from cdl.core.gui.panel.base import BaseDataPanel
11,256
.. autosummary:: :toctree: SimpleObjectTree GetObjectDialog ObjectView .. autoclass:: SimpleObjectTree :members: .. autoclass:: GetObjectDialog :members: .. autoclass:: ObjectView :members: .. note:: This module provides tree widgets to display signals, images and groups. It is important to note that, by design, the user can only select either individual signals/images or groups, but not both at the same time. This is an important design choice, as it allows to simplify the user experience, and to avoid potential confusion between the two types of selection. """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SimpleObjectTree(QW.QTreeWidget): """Base object handling panel list widget, object (sig/ima) lists""" SIG_ITEM_DOUBLECLICKED = QC.Signal(str) SIG_CONTEXT_MENU = QC.Signal(QC.QPoint) def __init__(self, parent: QW.QWidget, objmodel: ObjectModel) -> None: self.objmodel: ObjectModel = objmodel super().__init__(parent) self.setHeaderHidden(True) self.setColumnCount(1) self.setAlternatingRowColors(True) self.itemDoubleClicked.connect(self.item_double_clicked) def __str__(self) -> str: """Return string representation""" textlist = [] for tl_index in range(self.topLevelItemCount()): tl_item = self.topLevelItem(tl_index) textlist.append(tl_item.text(0)) for index in range(tl_item.childCount()): textlist.append(" " + tl_item.child(index).text(0)) return os.linesep.join(textlist) def initialize_from(self, sobjlist: SimpleObjectTree) -> None: """Init from another SimpleObjectList, without making copies of objects""" self.objmodel = sobjlist.objmodel self.populate_tree() self.set_current_item_id(sobjlist.get_current_item_id()) def iter_items( self, item: QW.QTreeWidgetItem | None = None ) -> Iterator[QW.QTreeWidgetItem]: """Recursively iterate over all items""" if item is None: for index in range(self.topLevelItemCount()): yield from self.iter_items(self.topLevelItem(index)) else: yield item for index in range(item.childCount()): yield from self.iter_items(item.child(index)) def get_item_from_id(self, item_id) -> QW.QTreeWidgetItem: """Return QTreeWidgetItem from id (stored in item's data)""" for item in self.iter_items(): if item.data(0, QC.Qt.UserRole) == item_id: return item return None def get_current_item_id(self, object_only: bool = False) -> str | None: """Return current item id""" item = self.currentItem() if item is not None and (not object_only or item.parent() is not None): return item.data(0, QC.Qt.UserRole) return None def set_current_item_id(self, uuid: str, extend: bool = False) -> None: """Set current item by id""" item = self.get_item_from_id(uuid) if extend: self.setCurrentItem(item, 0, QC.QItemSelectionModel.Select) else: self.setCurrentItem(item) def get_current_group_id(self) -> str: """Return current group ID""" selected_item = self.currentItem() if selected_item is None: return None if selected_item.parent() is None: return selected_item.data(0, QC.Qt.UserRole) return selected_item.parent().data(0, QC.Qt.UserRole) @staticmethod def __update_item( item: QW.QTreeWidgetItem, obj: SignalObj | ImageObj | ObjectGroup ) -> None: """Update item""" item.setText(0, f"{obj.short_id}: {obj.title}") if isinstance(obj, (SignalObj, ImageObj)): item.setToolTip(0, obj.metadata_to_html()) item.setData(0, QC.Qt.UserRole, obj.uuid) def populate_tree(self) -> None: """Populate tree with objects""" uuid = self.get_current_item_id()
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Object (signal/image) view widgets ---------------------------------- This module provides widgets to display object (signal/image) trees. .. autosummary:: :toctree: SimpleObjectTree GetObjectDialog ObjectView .. autoclass:: SimpleObjectTree :members: .. autoclass:: GetObjectDialog :members: .. autoclass:: ObjectView :members: .. note:: This module provides tree widgets to display signals, images and groups. It is important to note that, by design, the user can only select either individual signals/images or groups, but not both at the same time. This is an important design choice, as it allows to simplify the user experience, and to avoid potential confusion between the two types of selection. """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SimpleObjectTree(QW.QTreeWidget): """Base object handling panel list widget, object (sig/ima) lists""" SIG_ITEM_DOUBLECLICKED = QC.Signal(str) SIG_CONTEXT_MENU = QC.Signal(QC.QPoint) def __init__(self, parent: QW.QWidget, objmodel: ObjectModel) -> None: self.objmodel: ObjectModel = objmodel super().__init__(parent) self.setHeaderHidden(True) self.setColumnCount(1) self.setAlternatingRowColors(True) self.itemDoubleClicked.connect(self.item_double_clicked) def __str__(self) -> str: """Return string representation""" textlist = [] for tl_index in range(self.topLevelItemCount()): tl_item = self.topLevelItem(tl_index) textlist.append(tl_item.text(0)) for index in range(tl_item.childCount()): textlist.append(" " + tl_item.child(index).text(0)) return os.linesep.join(textlist) def initialize_from(self, sobjlist: SimpleObjectTree) -> None: """Init from another SimpleObjectList, without making copies of objects""" self.objmodel = sobjlist.objmodel self.populate_tree() self.set_current_item_id(sobjlist.get_current_item_id()) def iter_items( self, item: QW.QTreeWidgetItem | None = None ) -> Iterator[QW.QTreeWidgetItem]: """Recursively iterate over all items""" if item is None: for index in range(self.topLevelItemCount()): yield from self.iter_items(self.topLevelItem(index)) else: yield item for index in range(item.childCount()): yield from self.iter_items(item.child(index)) def get_item_from_id(self, item_id) -> QW.QTreeWidgetItem: """Return QTreeWidgetItem from id (stored in item's data)""" for item in self.iter_items(): if item.data(0, QC.Qt.UserRole) == item_id: return item return None def get_current_item_id(self, object_only: bool = False) -> str | None: """Return current item id""" item = self.currentItem() if item is not None and (not object_only or item.parent() is not None): return item.data(0, QC.Qt.UserRole) return None def set_current_item_id(self, uuid: str, extend: bool = False) -> None: """Set current item by id""" item = self.get_item_from_id(uuid) if extend: self.setCurrentItem(item, 0, QC.QItemSelectionModel.Select) else: self.setCurrentItem(item) def get_current_group_id(self) -> str: """Return current group ID""" selected_item = self.currentItem() if selected_item is None: return None if selected_item.parent() is None: return selected_item.data(0, QC.Qt.UserRole) return selected_item.parent().data(0, QC.Qt.UserRole) @staticmethod def __update_item( item: QW.QTreeWidgetItem, obj: SignalObj | ImageObj | ObjectGroup ) -> None: """Update item""" item.setText(0, f"{obj.short_id}: {obj.title}") if isinstance(obj, (SignalObj, ImageObj)): item.setToolTip(0, obj.metadata_to_html()) item.setData(0, QC.Qt.UserRole, obj.uuid) def populate_tree(self) -> None: """Populate tree with objects""" uuid = self.get_current_item_id()
with block_signals(widget=self, enable=True):
4
2023-11-09 16:56:03+00:00
16k
lalalamdbf/PLSE_IDRR
src/prompt-tuning/prompt/pipeline_base.py
[ { "identifier": "InputExample", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be passed via meta.\n\n Args:\n guid (:obj:`str`, optional): A unique identifier of the example.\n text_a (:obj:`str`, optional): The placeholder for sequence of text.\n text_b (:obj:`str`, optional): A secend sequence of text, which is not always necessary.\n label (:obj:`int`, optional): The label id of the example in classification task.\n tgt_text (:obj:`Union[str,List[str]]`, optional): The target sequence of the example in a generation task..\n meta (:obj:`Dict`, optional): An optional dictionary to store arbitrary extra information for the example.\n \"\"\"\n\n def __init__(self,\n guid = None,\n text_a = \"\",\n text_b = \"\",\n label = None,\n meta: Optional[Dict] = None,\n tgt_text: Optional[Union[str,List[str]]] = None\n ):\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n self.meta = meta if meta else {}\n self.tgt_text = tgt_text\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n r\"\"\"Serialize this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n r\"\"\"Serialize this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def keys(self, keep_none=False):\n return [key for key in self.__dict__.keys() if getattr(self, key) is not None]\n\n @staticmethod\n def load_examples(path: str) -> List['InputExample']:\n \"\"\"Load a set of input examples from a file\"\"\"\n with open(path, 'rb') as fh:\n return pickle.load(fh)\n\n @staticmethod\n def save_examples(examples: List['InputExample'], path: str) -> None:\n \"\"\"Save a set of input examples to a file\"\"\"\n with open(path, 'wb') as fh:\n pickle.dump(examples, fh)" }, { "identifier": "InputFeatures", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputFeatures(dict):\n \"\"\"\n The class for input to the PLM and Prompts. To make users explicitly know the available keys,\n we define a dict with a set of predefined possible keys. The default value to any key is None.\n When use it as a dict, all the keys whose values are None are invisible.\n\n This class support most of the dict's operation (See Examples). It can also be consumed by\n pytorch's default_collate in DataLoader.\n Also a :py:meth:`to_tensor()` method is build to convert the values into torch.Tensor for torch's input.\n\n Examples:\n\n .. code-block:: python\n\n in_feat = InputFeatures(**{'input_ids':[1,4,5], 'soft_token_ids': [3,4,5]}) # init from dict\n print(in_feat.keys()) # ['input_ids, 'soft_token_ids']\n in_feat['label'] = 3 # can assign value like normal dict\n print(in_feat.keys()) # ['input_ids','label', 'soft_token_ids'] (Note that it's also ordered)\n print(in_feat['label']) # 3\n in_feat['alice'] = 0 # KeyError: Key alice not in predefined set of keys\n in_feat.values() # [[1,4,5], 3, [3,4,5]] (Note that it's also ordered)\n [in_feat[key] for key in in_feat] # [[1,4,5], 3, [3,4,5]]\n new_dict= {**in_feat, 'new_key':2} # new_dict is {'input_ids': [1, 4, 5], 'label': 3, 'soft_token_ids': [3, 4, 5], 'new_key': 2}\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded)\n tokens.\n token_type_ids: (Optional) Segment token indices to indicate first and second\n portions of the inputs. Only some models use them.\n label: (Optional) Label corresponding to the input. Int for classification problems,\n float for regression problems.\n \"\"\"\n tensorable_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','conns_index']\n all_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','guid', 'tgt_text', 'encoded_tgt_text', 'input_ids_len','conns_index']\n non_tensorable_keys = []\n\n def __init__(self,\n input_ids: Optional[Union[List, torch.Tensor]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n attention_mask: Optional[Union[List[int], torch.Tensor]] = None,\n token_type_ids: Optional[Union[List[int], torch.Tensor]] = None,\n label: Optional[Union[int, torch.Tensor]] = None,\n decoder_input_ids: Optional[Union[List, torch.Tensor]] = None,\n decoder_inputs_embeds: Optional[torch.Tensor] = None,\n soft_token_ids: Optional[Union[List, torch.Tensor]] = None,\n past_key_values: Optional[torch.Tensor] = None, # for prefix_tuning\n loss_ids: Optional[Union[List, torch.Tensor]] = None,\n guid: Optional[str] = None,\n tgt_text: Optional[str] = None,\n use_cache: Optional[bool] = None,\n encoded_tgt_text: Optional[str] = None,\n input_ids_len: Optional[int] = None,\n conns_index = None,\n **kwargs):\n\n self.input_ids = input_ids\n self.inputs_embeds = inputs_embeds\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n self.decoder_input_ids = decoder_input_ids\n self.decoder_inputs_embeds = decoder_inputs_embeds\n self.soft_token_ids = soft_token_ids\n self.past_key_values = past_key_values\n self.loss_ids = loss_ids\n self.guid = guid\n self.tgt_text = tgt_text\n self.encoded_tgt_text = encoded_tgt_text\n self.use_cache = use_cache\n self.input_ids_len = input_ids_len\n self.conns_index = conns_index\n\n for k in kwargs.keys():\n setattr(self, k, kwargs[k])\n\n @classmethod\n def add_tensorable_keys(cls, *args):\n cls.tensorable_keys.extend(args)\n\n @classmethod\n def add_not_tensorable_keys(cls, *args):\n cls.not_tensorable_keys.extend(args)\n\n @classmethod\n def add_keys(cls, *args):\n cls.all_keys.extend(args)\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def __len__(self):\n return len(self.keys())\n\n def to_tensor(self, device: str = 'cuda'):\n \"\"\"inplace operation, convert all tensorable features into :obj:`torch.tensor`\"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, torch.tensor(value))\n return self\n\n def to(self, device: str = \"cuda:0\"):\n r\"\"\"move the tensor keys to runtime device, such as gpu:0\n \"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, value.to(device))\n return self\n\n def cuda(self, device: str = \"cuda:0\"):\n r\"\"\"mimic the tensor behavior\n \"\"\"\n return self.to(device)\n\n def to_json_string(self, keep_none=False):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if isinstance(value, torch.Tensor):\n data[key] = value.detach().cpu().tolist()\n elif value is None and keep_none:\n data[key] = None\n else:\n data[key] = value\n return json.dumps(data) + \"\\n\"\n\n def keys(self, keep_none=False) -> List[str]:\n \"\"\"get all keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[str]`: keys of the InputFeatures\n \"\"\"\n if keep_none:\n return self.all_keys\n else:\n return [key for key in self.all_keys if getattr(self, key) is not None]\n\n def to_dict(self, keep_none=False) -> Dict[str, Any]:\n \"\"\"get the dict of mapping from keys to values of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`Dict[str, Any]`: dict of mapping from keys to values of the InputFeatures\n \"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if value is not None:\n data[key] = value\n elif value is None and keep_none:\n data[key] = None\n return data\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __iter__(self):\n return iter(self.keys())\n\n def __setitem__(self, key, item):\n if key not in self.all_keys:\n raise KeyError(\"Key {} not in predefined set of keys\".format(key))\n setattr(self, key, item)\n\n def values(self, keep_none=False) -> List[Any]:\n \"\"\"get the values with respect to the keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the values with respect to the keys of the InputFeatures\n \"\"\"\n return [getattr(self, key) for key in self.keys(keep_none=keep_none)]\n\n def __contains__(self, key, keep_none=False):\n return key in self.keys(keep_none)\n\n def items(self,):\n \"\"\"get the (key, value) pairs of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the (key, value) pairs of the InputFeatures\n \"\"\"\n return [(key, self.__getitem__(key)) for key in self.keys()]\n\n @staticmethod\n def collate_fct(batch: List):\n r'''\n This function is used to collate the input_features.\n\n Args:\n batch (:obj:`List[Union[Dict, InputFeatures]]`): A batch of the current data.\n\n Returns:\n :obj:`InputFeatures`: Return the :py:class:`~openprompt.data_utils.data_utils.InputFeatures of the current batch of data.\n '''\n\n\n elem = batch[0]\n return_dict = {}\n for key in elem:\n if key == \"encoded_tgt_text\":\n return_dict[key] = [d[key] for d in batch]\n else:\n try:\n return_dict[key] = default_collate([d[key] for d in batch])\n except:\n print(f\"key{key}\\n d {[batch[i][key] for i in range(len(batch))]} \")\n\n return InputFeatures(**return_dict)" }, { "identifier": "TokenizerWrapper", "path": "src/prompt-tuning/prompt/utils.py", "snippet": "class TokenizerWrapper:\n def __init__(self,\n max_seq_length: int,\n tokenizer: PreTrainedTokenizer,\n # truncate_method: Optional[str] = 'tail',\n create_token_type_ids: Optional[str] = False,\n segment_emb: Optional[str] = False,\n **kwargs):\n self.max_seq_length = max_seq_length\n\n self.tokenizer = tokenizer\n self.truncate_fct = self.truncate_from_tail\n\n self.create_token_type_ids = create_token_type_ids\n self.segment_emb = segment_emb\n\n self.template_mask_token = '<mask>'\n # self.template_eos_token = '<eos>'\n # self.template_bos_token = '<bos>'\n self.template_sep_token = '<sep>'\n self.template_cls_token = '<cls>'\n self.template_pad_token = '<pad>'\n\n from transformers import logging\n verbosity_before = logging.get_verbosity()\n logging.set_verbosity(logging.CRITICAL) # TODO solve this in a more elegant way\n self.mask_token_map = {self.template_mask_token: self.tokenizer.mask_token if hasattr(self.tokenizer, 'mask_token') else ''}\n # self.eos_token_map = {self.template_eos_token: self.tokenizer.eos_token if hasattr(self.tokenizer, 'eos_token') else ''}\n # self.bos_token_map = {self.template_bos_token: self.tokenizer.bos_token if hasattr(self.tokenizer, 'bos_token') else ''}\n self.sep_token_map = {self.template_sep_token: self.tokenizer.sep_token if hasattr(self.tokenizer, 'sep_token') else ''}\n self.cls_token_map = {self.template_cls_token: self.tokenizer.cls_token if hasattr(self.tokenizer, 'cls_token') else ''}\n self.pad_token_map = {self.template_pad_token: self.tokenizer.pad_token if hasattr(self.tokenizer, 'pad_token') else ''}\n logging.set_verbosity(verbosity_before)\n\n self.num_truncated_sentences = 0\n self.total_passed_sentences = 0\n\n @property\n def truncate_rate(self,):\n r\"\"\"Using this function, one can easily identify how many sentence has be truncated, thus help the user to choose a better thresthold for chunking.\n \"\"\"\n if self.total_passed_sentences==0:\n return None\n else:\n return self.num_truncated_sentences/self.total_passed_sentences\n\n @property\n def special_tokens_maps(self,) -> Dict:\n r\"\"\"This need to be specified in specific language model\n \"\"\"\n if not hasattr(self, \"_special_tokens_map\"):\n _special_tokens_map = {}\n for attrname in self.__dict__.keys():\n if attrname.endswith('_token_map'):\n _special_tokens_map.update(getattr(self, attrname))\n return _special_tokens_map\n\n def tokenize_with_mask(self,\n wrapped_example: List[Dict],\n ) -> InputFeatures:\n raise NotImplementedError\n\n def tokenize_without_mask(self,\n wrapped_example: List[Dict],\n ) -> InputFeatures:\n raise NotImplementedError\n\n\n @staticmethod\n def truncate_from_tail(input_dict: Dict,\n num_tokens_to_truncate: int=0) -> Dict:\n r\"\"\"truncate the inputs from the rear\n \"\"\"\n truncated_example = defaultdict(list)\n shortenable_ids = input_dict['shortenable_ids']\n for key in input_dict:\n parts = input_dict[key]\n to_trunc = num_tokens_to_truncate\n for i, part in enumerate(parts[::-1]):\n if len(part) == 0: # to prevent some part are empty after tokenization\n continue\n if shortenable_ids[-1-i][0]==0: # ==0 means the part is not shortenable\n continue\n parts[-1-i] = part[:-to_trunc] if to_trunc<len(part) else []\n to_trunc -= len(part)\n if to_trunc <= 0:\n break\n truncated_example[key] = parts\n return truncated_example\n\n\n @staticmethod\n def concate_parts(input_dict: Dict) -> Dict:\n for key in input_dict:\n input_dict[key] = list(itertools.chain(*input_dict[key]))\n return input_dict\n\n @staticmethod\n def padding(input_dict: Dict,\n max_len: int, pad_id_for_inputs: int=0, pad_id_for_others: int=0) -> None:\n for key, value in input_dict.items():\n if (len(input_dict[key]) > max_len):\n raise ValueError(f'''Truncated seq length of '{key}' still greater than max length {max_len}.\"\\\n \"One possible reason is that no enough shortenable parts in template. Try adding {{\"shortenable\": \"True\"}} property.\n ''')\n if 'input' in key:\n input_dict[key].extend([pad_id_for_inputs]*(max_len-len(value)))\n else:\n input_dict[key].extend([pad_id_for_others]*(max_len-len(value)))\n return input_dict\n\n\n def add_special_tokens(self, encoder_inputs):\n # add special tokens\n for key in encoder_inputs:\n if key == \"input_ids\":\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n encoder_inputs[key] = self.tokenizer.build_inputs_with_special_tokens(\n encoder_inputs[key])\n else:\n special_tokens_mask = np.array(self.tokenizer.get_special_tokens_mask(encoder_inputs[key]))\n with_special_tokens = np.array(self.tokenizer.build_inputs_with_special_tokens(encoder_inputs[key]))\n if key in [\"soft_token_ids\"]: # TODO maybe more than this\n encoder_inputs[key] = ((1-special_tokens_mask) * with_special_tokens).tolist() # use 0 as special\n else:\n encoder_inputs[key] = ((1-special_tokens_mask) * with_special_tokens - special_tokens_mask*100).tolist() # use -100 as special\n return encoder_inputs\n\n def truncate(self, encoder_inputs):\n total_tokens = sum([len(part) for part in encoder_inputs['input_ids']])\n num_specials = self.num_special_tokens_to_add\n num_tokens_to_truncate = total_tokens - self.max_seq_length + num_specials\n self.total_passed_sentences+=1\n if num_tokens_to_truncate>0:\n self.num_truncated_sentences += 1\n encoder_inputs = self.truncate_fct(input_dict=encoder_inputs,\n num_tokens_to_truncate=num_tokens_to_truncate)\n return encoder_inputs" }, { "identifier": "Template", "path": "src/prompt-tuning/prompt/prompt_base.py", "snippet": "class Template(nn.Module):\n r'''\n Base class for all the templates.\n Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.\n '''\n\n registered_inputflag_names = [\"loss_ids\", \"shortenable_ids\"]\n\n def __init__(self,\n tokenizer: PreTrainedTokenizer,\n placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.placeholder_mapping = placeholder_mapping\n self._in_on_text_set = False\n\n self.mixed_token_start = \"{\"\n self.mixed_token_end = \"}\"\n\n\n def get_default_loss_ids(self) -> List[int]:\n '''Get the loss indices for the template using mask.\n e.g. when self.text is ``'{\"placeholder\": \"text_a\"}. {\"meta\": \"word\"} is {\"mask\"}.'``,\n output is ``[0, 0, 0, 0, 1, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]:\n\n - 1 for a masked tokens.\n - 0 for a sequence tokens.\n '''\n return [1 if 'mask' in d else 0 for d in self.text]\n\n def get_default_shortenable_ids(self) -> List[int]:\n \"\"\"Every template needs shortenable_ids, denoting which part of the template can be truncate to fit\n the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other\n special tokens are not shortenable.\n\n e.g. when self.text is ``'{\"placeholder\": \"text_a\"} {\"placeholder\": \"text_b\", \"shortenable\": False} {\"meta\": \"word\"} is {\"mask\"}.'``,\n output is ``[1, 0, 0, 0, 0, 0, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range ``[0, 1]``:\n\n - 1 for the input tokens.\n - 0 for the template sequence tokens.\n \"\"\"\n idx = []\n for d in self.text:\n if 'shortenable' in d:\n idx.append(1 if d['shortenable'] else 0)\n else:\n idx.append(1 if 'placeholder' in d else 0)\n return idx\n\n def get_default_soft_token_ids(self) -> List[int]:\n r'''\n This function identifies which tokens are soft tokens.\n\n Sometimes tokens in the template are not from the vocabulary,\n but a sequence of soft tokens.\n In this case, you need to implement this function\n\n Raises:\n NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.\n '''\n raise NotImplementedError\n\n def incorporate_text_example(self,\n example: InputExample,\n text = None,\n ):\n if text is None:\n text = self.text.copy()\n else:\n text = text.copy()\n\n for i, d in enumerate(text):\n if 'placeholder' in d:\n text[i] = d[\"add_prefix_space\"] + d.get(\"post_processing\", lambda x:x)(getattr(example, d['placeholder']))\n elif 'meta' in d:\n text[i] = d[\"add_prefix_space\"] + d.get(\"post_processing\", lambda x:x)(example.meta[d['meta']])\n elif 'soft' in d:\n text[i] = ''; # unused\n elif 'mask' in d:\n text[i] = '<mask>'\n elif 'special' in d:\n text[i] = d['special']\n elif 'text' in d:\n text[i] = d[\"add_prefix_space\"] + d['text']\n else:\n raise ValueError(f'can not parse {d}')\n return text\n\n def _check_template_format(self, ):\n r\"\"\"check whether the template format is correct.\n TODO: add more\n \"\"\"\n mask_num = 0\n for i, d in enumerate(self.text):\n if 'mask' in d:\n mask_num += 1\n\n if mask_num==0:\n raise RuntimeError(f\"'mask' position not found in the template: {self.text}. Please Check!\")\n\n\n\n\n def parse_text(self, text: str) -> List[Dict]:\n parsed = []\n i = 0\n while i < len(text):\n d = {\"add_prefix_space\": ' ' if (i > 0 and text[i-1] == ' ') else ''}\n while i < len(text) and text[i] == ' ':\n d[\"add_prefix_space\"] = ' '\n i = i + 1\n if i == len(text): break\n\n if text[i] != self.mixed_token_start:\n j = i + 1\n while j < len(text):\n if text[j] == self.mixed_token_start:\n break\n j = j + 1\n d[\"text\"] = text[i:j].rstrip(' ')\n i = j\n\n else:\n j = i + 1\n mixed_token_cnt = 1 # { {} {} } nested support\n while j < len(text):\n if text[j] == self.mixed_token_end:\n mixed_token_cnt -= 1\n if mixed_token_cnt == 0: break\n elif text[j] == self.mixed_token_start:\n mixed_token_cnt += 1\n j = j + 1\n if j == len(text):\n raise ValueError(f\"mixed_token_start {self.mixed_token_start} at position {i} has no corresponding mixed_token_end {self.mixed_token_end}\")\n dict_str = '{'+text[i+1:j]+'}'\n try:\n val = eval(dict_str)\n if isinstance(val, set):\n val = {k: None for k in val}\n d.update(val)\n except:\n import traceback\n print(traceback.format_exc())\n print(f\"syntax error in {dict_str}\")\n exit()\n i = j + 1\n\n parsed.append(d)\n\n return parsed\n\n # @abstractmethod\n def wrap_one_example(self,\n example: InputExample) -> List[Dict]:\n r'''Given an input example which contains input text, which can be referenced\n by self.template.placeholder_mapping 's value.\n This function process the example into a list of dict,\n Each dict functions as a group, which has the sample properties, such as\n whether it's shortenable, whether it's the masked position, whether it's soft token, etc.\n Since a text will be tokenized in the subsequent processing procedure,\n these attributes are broadcasted along the tokenized sentence.\n\n Args:\n example (:obj:`InputExample`): An :py:class:`~openprompt.data_utils.data_utils.InputExample` object, which should have attributes that are able to be filled in the template.\n\n Returns:\n :obj:`List[Dict]`: A list of dict of the same length as self.text. e.g. ``[{\"loss_ids\": 0, \"text\": \"It was\"}, {\"loss_ids\": 1, \"text\": \"<mask>\"}, ]``\n '''\n\n if self.text is None:\n raise ValueError(\"template text has not been initialized\")\n if isinstance(example, InputExample):\n text = self.incorporate_text_example(example)\n\n not_empty_keys = example.keys()\n for placeholder_token in self.placeholder_mapping:\n not_empty_keys.remove(self.placeholder_mapping[placeholder_token]) # placeholder has been processed, remove\n not_empty_keys.remove('meta') # meta has been processed\n\n keys, values= ['text'], [text]\n for inputflag_name in self.registered_inputflag_names:\n keys.append(inputflag_name)\n v = None\n if hasattr(self, inputflag_name) and getattr(self, inputflag_name) is not None:\n v = getattr(self, inputflag_name)\n elif hasattr(self, \"get_default_\"+inputflag_name):\n v = getattr(self, \"get_default_\"+inputflag_name)()\n setattr(self, inputflag_name, v) # cache\n else:\n raise ValueError(\"\"\"\n Template's inputflag '{}' is registered but not initialize.\n Try using template.{} = [...] to initialize\n or create an method get_default_{}(self) in your template.\n \"\"\".format(inputflag_name, inputflag_name, inputflag_name))\n\n if len(v) != len(text):\n raise ValueError(\"Template: len({})={} doesn't match len(text)={}.\"\\\n .format(inputflag_name, len(v), len(text)))\n values.append(v)\n wrapped_parts_to_tokenize = []\n for piece in list(zip(*values)):\n wrapped_parts_to_tokenize.append(dict(zip(keys, piece)))\n\n wrapped_parts_not_tokenize = {key: getattr(example, key) for key in not_empty_keys}\n return [wrapped_parts_to_tokenize, wrapped_parts_not_tokenize]\n else:\n raise TypeError(\"InputExample\")\n\n @abstractmethod\n def process_batch(self, batch):\n r\"\"\"Template should rewrite this method if you need to process the batch input such as substituting embeddings.\n \"\"\"\n return batch # not being processed\n\n def post_processing_outputs(self, outputs):\n r\"\"\"Post processing the outputs of language models according\n to the need of template. Most templates don't need post processing,\n The template like SoftTemplate, which appends soft template as a module\n (rather than a sequence of input tokens) to the input,\n should remove the outputs on these positions to keep the seq_len the same\n \"\"\"\n return outputs\n\n def save(self,\n path: str,\n **kwargs) -> None:\n r'''\n A save method API.\n\n Args:\n path (str): A path to save your template.\n '''\n raise NotImplementedError\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, text):\n self._text = text\n if text is None:\n return\n if not self._in_on_text_set:\n self.safe_on_text_set()\n self._check_template_format()\n\n def safe_on_text_set(self) -> None:\n r\"\"\"With this wrapper function, setting text inside ``on_text_set()``\n will not trigger ``on_text_set()`` again to prevent endless recursion.\n \"\"\"\n self._in_on_text_set = True\n self.on_text_set()\n self._in_on_text_set = False\n\n @abstractmethod\n def on_text_set(self):\n r\"\"\"\n A hook to do something when template text was set.\n The designer of the template should explicitly know what should be down when the template text is set.\n \"\"\"\n raise NotImplementedError\n\n def from_file(self,\n path: str,\n choice: int = 0,\n ):\n r'''\n Read the template from a local file.\n\n Args:\n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The id-th line of the file.\n '''\n with open(path, 'r') as fin:\n text = fin.readlines()[choice].rstrip()\n self.text = text\n return self" }, { "identifier": "Verbalizer", "path": "src/prompt-tuning/prompt/prompt_base.py", "snippet": "class Verbalizer(nn.Module):\n r'''\n Base class for all the verbalizers.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n classes (:obj:`Sequence[str]`): A sequence of classes that need to be projected.\n '''\n def __init__(self,\n tokenizer: Optional[PreTrainedTokenizer] = None,\n classes: Optional[Sequence[str]] = None,\n num_classes: Optional[int] = None,\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.classes = classes\n if classes is not None and num_classes is not None:\n assert len(classes) == num_classes, \"len(classes) != num_classes, Check you config.\"\n self.num_classes = num_classes\n elif num_classes is not None:\n self.num_classes = num_classes\n elif classes is not None:\n self.num_classes = len(classes)\n else:\n self.num_classes = None\n # raise AttributeError(\"No able to configure num_classes\")\n self._in_on_label_words_set = False\n\n @property\n def label_words(self,):\n r'''\n Label words means the words in the vocabulary projected by the labels.\n E.g. if we want to establish a projection in sentiment classification: positive :math:`\\rightarrow` {`wonderful`, `good`},\n in this case, `wonderful` and `good` are label words.\n '''\n if not hasattr(self, \"_label_words\"):\n raise RuntimeError(\"label words haven't been set.\")\n return self._label_words\n\n @label_words.setter\n def label_words(self, label_words):\n if label_words is None:\n return\n self._label_words = self._match_label_words_to_label_ids(label_words)\n if not self._in_on_label_words_set:\n self.safe_on_label_words_set()\n\n def _match_label_words_to_label_ids(self, label_words): # TODO newly add function after docs written # TODO rename this function\n \"\"\"\n sort label words dict of verbalizer to match the label order of the classes\n \"\"\"\n if isinstance(label_words, dict):\n if self.classes is None:\n raise ValueError(\"\"\"\n classes attribute of the Verbalizer should be set since your given label words is a dict.\n Since we will match the label word with respect to class A, to A's index in classes\n \"\"\")\n if set(label_words.keys()) != set(self.classes):\n raise ValueError(\"name of classes in verbalizer are different from those of dataset\")\n label_words = [ # sort the dict to match dataset\n label_words[c]\n for c in self.classes\n ] # length: label_size of the whole task\n elif isinstance(label_words, list) or isinstance(label_words, tuple):\n pass\n # logger.info(\"\"\"\n # Your given label words is a list, by default, the ith label word in the list will match class i of the dataset.\n # Please make sure that they have the same order.\n # Or you can pass label words as a dict, mapping from class names to label words.\n # \"\"\")\n else:\n raise ValueError(\"Verbalizer label words must be list, tuple or dict\")\n return label_words\n\n def safe_on_label_words_set(self,):\n self._in_on_label_words_set = True\n self.on_label_words_set()\n self._in_on_label_words_set = False\n\n def on_label_words_set(self,):\n r\"\"\"A hook to do something when textual label words were set.\n \"\"\"\n pass\n\n @property\n def vocab(self,) -> Dict:\n if not hasattr(self, '_vocab'):\n self._vocab = self.tokenizer.convert_ids_to_tokens(np.arange(self.vocab_size).tolist())\n return self._vocab\n\n @property\n def vocab_size(self,) -> int:\n return self.tokenizer.vocab_size\n\n @abstractmethod\n def generate_parameters(self, **kwargs) -> List:\n r\"\"\"\n The verbalizer can be seen as an extra layer on top of the original\n pre-trained models. In manual verbalizer, it is a fixed one-hot vector of dimension\n ``vocab_size``, with the position of the label word being 1 and 0 everywhere else.\n In other situation, the parameters may be a continuous vector over the\n vocab, with each dimension representing a weight of that token.\n Moreover, the parameters may be set to trainable to allow label words selection.\n\n Therefore, this function serves as an abstract methods for generating the parameters\n of the verbalizer, and must be instantiated in any derived class.\n\n Note that the parameters need to be registered as a part of pytorch's module to\n It can be achieved by wrapping a tensor using ``nn.Parameter()``.\n \"\"\"\n raise NotImplementedError\n\n def register_calibrate_logits(self, logits: torch.Tensor):\n r\"\"\"\n This function aims to register logits that need to be calibrated, and detach the original logits from the current graph.\n \"\"\"\n if logits.requires_grad:\n logits = logits.detach()\n self._calibrate_logits = logits\n\n def process_outputs(self,\n outputs: torch.Tensor,\n conn_linear_logits = None, \n **kwargs):\n r\"\"\"By default, the verbalizer will process the logits of the PLM's\n output.\n\n Args:\n logits (:obj:`torch.Tensor`): The current logits generated by pre-trained language models.\n batch (:obj:`Union[Dict, InputFeatures]`): The input features of the data.\n \"\"\"\n if conn_linear_logits != None:\n return self.process_logits(outputs, conn_linear_logits, **kwargs)\n else:\n return self.process_logits(outputs, **kwargs)\n\n def gather_outputs(self, outputs: ModelOutput):\n r\"\"\" retrieve useful output for the verbalizer from the whole model output\n By default, it will only retrieve the logits\n\n Args:\n outputs (:obj:`ModelOutput`) The output from the pretrained language model.\n\n Return:\n :obj:`torch.Tensor` The gathered output, should be of shape (``batch_size``,\n ``seq_len``, ``any``)\n \"\"\"\n return outputs.logits\n\n @staticmethod\n def aggregate(label_words_logits: torch.Tensor) -> torch.Tensor:\n r\"\"\" To aggregate logits on multiple label words into the label's logits\n Basic aggregator: mean of each label words' logits to a label's logits\n Can be re-implemented in advanced verbaliezer.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`): The logits of the label words only.\n\n Return:\n :obj:`torch.Tensor`: The final logits calculated by the label words.\n \"\"\"\n if label_words_logits.dim()>2:\n return label_words_logits.mean(dim=-1)\n else:\n return label_words_logits\n\n\n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Given logits regarding the entire vocab, calculate the probs over the label words set by softmax.\n\n Args:\n logits(:obj:`Tensor`): The logits of the entire vocab.\n\n Returns:\n :obj:`Tensor`: The probability distribution over the label words set.\n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n @abstractmethod\n def project(self,\n logits: torch.Tensor,\n **kwargs) -> torch.Tensor:\n r\"\"\"This method receives input logits of shape ``[batch_size, vocab_size]``, and use the\n parameters of this verbalizer to project the logits over entire vocab into the\n logits of labels words.\n\n Args:\n logits (:obj:`Tensor`): The logits over entire vocab generated by the pre-trained language model with shape [``batch_size``, ``max_seq_length``, ``vocab_size``]\n\n Returns:\n :obj:`Tensor`: The normalized probs (sum to 1) of each label .\n \"\"\"\n raise NotImplementedError\n\n def handle_multi_token(self, label_words_logits, mask):\n r\"\"\"\n Support multiple methods to handle the multi tokens produced by the tokenizer.\n We suggest using 'first' or 'max' if the some parts of the tokenization is not meaningful.\n Can broadcast to 3-d tensor.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`):\n\n Returns:\n :obj:`torch.Tensor`\n \"\"\"\n if self.multi_token_handler == \"first\":\n label_words_logits = label_words_logits.select(dim=-1, index=0)\n elif self.multi_token_handler == \"max\":\n label_words_logits = label_words_logits - 1000*(1-mask.unsqueeze(0))\n label_words_logits = label_words_logits.max(dim=-1).values\n elif self.multi_token_handler == \"mean\":\n label_words_logits = (label_words_logits*mask.unsqueeze(0)).sum(dim=-1)/(mask.unsqueeze(0).sum(dim=-1)+1e-15)\n else:\n raise ValueError(\"multi_token_handler {} not configured\".format(self.multi_token_handler))\n return label_words_logits\n\n @classmethod\n \n\n def from_file(self,\n path: str,\n choice: Optional[int] = 0 ):\n r\"\"\"Load the predefined label words from verbalizer file.\n Currently support three types of file format:\n 1. a .jsonl or .json file, in which is a single verbalizer\n in dict format.\n 2. a .jsonal or .json file, in which is a list of verbalizers in dict format\n 3. a .txt or a .csv file, in which is the label words of a class are listed in line,\n separated by commas. Begin a new verbalizer by an empty line.\n This format is recommended when you don't know the name of each class.\n\n The details of verbalizer format can be seen in :ref:`How_to_write_a_verbalizer`.\n\n Args:\n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The choice of verbalizer in a file containing\n multiple verbalizers.\n\n Returns:\n Template : `self` object\n \"\"\"\n if path.endswith(\".txt\") or path.endswith(\".csv\"):\n with open(path, 'r') as f:\n lines = f.readlines()\n label_words_all = []\n label_words_single_group = []\n for line in lines:\n line = line.strip().strip(\" \")\n if line == \"\":\n if len(label_words_single_group)>0:\n label_words_all.append(label_words_single_group)\n label_words_single_group = []\n else:\n label_words_single_group.append(line)\n if len(label_words_single_group) > 0: # if no empty line in the last\n label_words_all.append(label_words_single_group)\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n\n label_words = label_words_all[choice]\n label_words = [label_words_per_label.strip().split(\",\") \\\n for label_words_per_label in label_words]\n\n elif path.endswith(\".jsonl\") or path.endswith(\".json\"):\n with open(path, \"r\") as f:\n label_words_all = json.load(f)\n # if it is a file containing multiple verbalizers\n if isinstance(label_words_all, list):\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n label_words = label_words_all[choice]\n elif isinstance(label_words_all, dict):\n label_words = label_words_all\n if choice>0:\n print(\"Choice of verbalizer is 1, but the file \\\n only contains one verbalizer.\")\n\n self.label_words = label_words\n if self.num_classes is not None:\n num_classes = len(self.label_words)\n assert num_classes==self.num_classes, 'number of classes in the verbalizer file\\\n does not match the predefined num_classes.'\n return self" } ]
from pickle import FALSE from torch.utils.data.sampler import RandomSampler from transformers.configuration_utils import PretrainedConfig from transformers.generation_utils import GenerationMixin from torch.utils.data import Dataset from typing import * from .data_utils import InputExample, InputFeatures from torch.utils.data._utils.collate import default_collate from tqdm.std import tqdm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.utils.dummy_pt_objects import PreTrainedModel from .utils import TokenizerWrapper from .prompt_base import Template, Verbalizer from collections import defaultdict from collections import namedtuple from torch.utils.data import DataLoader import torch import torch.nn as nn import inspect import numpy as np
11,450
r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None, verbalizer: Optional[Verbalizer] = None, max_seq_length: Optional[str] = 512, batch_size: Optional[int] = 1, shuffle: Optional[bool] = False, teacher_forcing: Optional[bool] = False, decoder_max_length: Optional[int] = -1, predict_eos_token: Optional[bool] = False, truncate_method: Optional[str] = "tail", drop_last: Optional[bool] = False, **kwargs, ): assert hasattr(dataset, "__iter__"), f"The dataset must have __iter__ method. dataset is {dataset}" assert hasattr(dataset, "__len__"), f"The dataset must have __len__ method. dataset is {dataset}" self.raw_dataset = dataset self.wrapped_dataset = [] self.tensor_dataset = [] self.template = template self.verbalizer = verbalizer self.batch_size = batch_size self.shuffle = shuffle self.teacher_forcing = teacher_forcing if tokenizer_wrapper is None: if tokenizer_wrapper_class is None: raise RuntimeError("Either wrapped_tokenizer or tokenizer_wrapper_class should be specified.") if tokenizer is None: raise RuntimeError("No tokenizer specified to instantiate tokenizer_wrapper.") tokenizer_wrapper_init_keys = signature(tokenizer_wrapper_class.__init__).args prepare_kwargs = { "max_seq_length" : max_seq_length, "truncate_method" : truncate_method, "decoder_max_length" : decoder_max_length, "predict_eos_token" : predict_eos_token, "tokenizer" : tokenizer, **kwargs, } to_pass_kwargs = {key: prepare_kwargs[key] for key in prepare_kwargs if key in tokenizer_wrapper_init_keys} self.tokenizer_wrapper = tokenizer_wrapper_class(**to_pass_kwargs) else: self.tokenizer_wrapper = tokenizer_wrapper # check the satisfiability of each component assert hasattr(self.template, 'wrap_one_example'), "Your prompt has no function variable \ named wrap_one_example" # process self.wrap() self.tokenize() if self.shuffle: sampler = RandomSampler(self.tensor_dataset) else: sampler = None self.dataloader = DataLoader( self.tensor_dataset, batch_size = self.batch_size, sampler= sampler,
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None, verbalizer: Optional[Verbalizer] = None, max_seq_length: Optional[str] = 512, batch_size: Optional[int] = 1, shuffle: Optional[bool] = False, teacher_forcing: Optional[bool] = False, decoder_max_length: Optional[int] = -1, predict_eos_token: Optional[bool] = False, truncate_method: Optional[str] = "tail", drop_last: Optional[bool] = False, **kwargs, ): assert hasattr(dataset, "__iter__"), f"The dataset must have __iter__ method. dataset is {dataset}" assert hasattr(dataset, "__len__"), f"The dataset must have __len__ method. dataset is {dataset}" self.raw_dataset = dataset self.wrapped_dataset = [] self.tensor_dataset = [] self.template = template self.verbalizer = verbalizer self.batch_size = batch_size self.shuffle = shuffle self.teacher_forcing = teacher_forcing if tokenizer_wrapper is None: if tokenizer_wrapper_class is None: raise RuntimeError("Either wrapped_tokenizer or tokenizer_wrapper_class should be specified.") if tokenizer is None: raise RuntimeError("No tokenizer specified to instantiate tokenizer_wrapper.") tokenizer_wrapper_init_keys = signature(tokenizer_wrapper_class.__init__).args prepare_kwargs = { "max_seq_length" : max_seq_length, "truncate_method" : truncate_method, "decoder_max_length" : decoder_max_length, "predict_eos_token" : predict_eos_token, "tokenizer" : tokenizer, **kwargs, } to_pass_kwargs = {key: prepare_kwargs[key] for key in prepare_kwargs if key in tokenizer_wrapper_init_keys} self.tokenizer_wrapper = tokenizer_wrapper_class(**to_pass_kwargs) else: self.tokenizer_wrapper = tokenizer_wrapper # check the satisfiability of each component assert hasattr(self.template, 'wrap_one_example'), "Your prompt has no function variable \ named wrap_one_example" # process self.wrap() self.tokenize() if self.shuffle: sampler = RandomSampler(self.tensor_dataset) else: sampler = None self.dataloader = DataLoader( self.tensor_dataset, batch_size = self.batch_size, sampler= sampler,
collate_fn = InputFeatures.collate_fct,
1
2023-11-01 08:52:36+00:00
16k
choderalab/chiron
Examples/LJ_mcmove.py
[ { "identifier": "LJPotential", "path": "chiron/potential.py", "snippet": "class LJPotential(NeuralNetworkPotential):\n def __init__(\n self,\n topology: Topology,\n sigma: unit.Quantity = 3.350 * unit.angstroms,\n epsilon: unit.Quantity = 1.0 * unit.kilocalories_per_mole,\n cutoff: unit.Quantity = unit.Quantity(1.0, unit.nanometer),\n ):\n \"\"\"\n Initialize the Lennard-Jones potential.\n\n Parameters\n ----------\n topology : Topology\n The topology of the system\n sigma : unit.Quantity, optional\n The distance at which the potential is zero, by default 3.350 * unit.angstroms\n epsilon : unit.Quantity, optional\n The depth of the potential well, by default 1.0 * unit.kilocalories_per_mole\n cutoff : unit.Quantity, optional\n The cutoff distance for the potential, by default 1.0 * unit.nanometer\n\n \"\"\"\n\n if not isinstance(topology, Topology):\n if not isinstance(topology, property):\n if topology is not None:\n raise TypeError(\n f\"Topology must be a Topology object or None, type(topology) = {type(topology)}\"\n )\n if not isinstance(sigma, unit.Quantity):\n raise TypeError(\n f\"sigma must be a unit.Quantity, type(sigma) = {type(sigma)}\"\n )\n if not isinstance(epsilon, unit.Quantity):\n raise TypeError(\n f\"epsilon must be a unit.Quantity, type(epsilon) = {type(epsilon)}\"\n )\n if not isinstance(cutoff, unit.Quantity):\n raise TypeError(\n f\"cutoff must be a unit.Quantity, type(cutoff) = {type(cutoff)}\"\n )\n\n if not sigma.unit.is_compatible(unit.angstrom):\n raise ValueError(f\"sigma must have units of distance, got {sigma.unit}\")\n if not epsilon.unit.is_compatible(unit.kilocalories_per_mole):\n raise ValueError(f\"epsilon must have units of energy, got {epsilon.unit}\")\n if not cutoff.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"cutoff must have units of distance, got {cutoff.unit}\")\n\n self.sigma = sigma.value_in_unit_system(\n unit.md_unit_system\n ) # The distance at which the potential is zero\n self.epsilon = epsilon.value_in_unit_system(\n unit.md_unit_system\n ) # The depth of the potential well\n # The cutoff for a potential is often linked with the parameters and isn't really\n # something I think we should be changing dynamically.\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.topology = topology\n\n from functools import partial\n\n @partial(jax.jit, static_argnums=(0,))\n def _compute_energy_masked(self, distance, mask):\n \"\"\"\n Compute the LJ energy based on an array representing the distances between a given particle and its neighbors.\n Since the distance array is padded to a fixed length, we need to mask out the padded values before summing the energy.\n\n Parameters\n ----------\n distance : jnp.array\n The distances between a given particle and its neighbors\n mask : jnp.array\n An array indicating which values in the distance array are valid and which are padded [1.0 or 0.0]\n \"\"\"\n\n # we can just multiply by the mask rather than using jnp.where to mask.\n energy = mask * (\n 4\n * self.epsilon\n * ((self.sigma / distance) ** 12 - (self.sigma / distance) ** 6)\n )\n return energy.sum()\n\n def compute_energy(self, positions: jnp.array, nbr_list=None, debug_mode=False):\n \"\"\"\n Compute the LJ energy.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n nbr_list : NeighborList, default=None\n Instance of a neighbor list or pair list class to use.\n If None, an unoptimized N^2 pairlist will be used without PBC conditions.\n Returns\n -------\n potential_energy : float\n The total potential energy of the system.\n\n \"\"\"\n # Compute the pair distances and displacement vectors\n\n if nbr_list is None:\n log.debug(\n \"nbr_list is None, computing using inefficient N^2 pairlist without PBC.\"\n )\n # Compute the pairlist for a given set of positions and a cutoff distance\n # Note in this case, we do not need the pairs or displacement vectors\n # Since we already calculate the distance in the pairlist computation\n # Pairs and displacement vectors are needed for an analytical evaluation of the force\n # which we will do as part of testing\n distances, displacement_vectors, pairs = self.compute_pairlist(\n positions, self.cutoff\n )\n # if our pairlist is empty, the particles are non-interacting and\n # the energy will be 0\n if distances.shape[0] == 0:\n return 0.0\n\n potential_energy = (\n 4\n * self.epsilon\n * ((self.sigma / distances) ** 12 - (self.sigma / distances) ** 6)\n )\n # sum over all pairs to get the total potential energy\n return potential_energy.sum()\n\n else:\n # ensure the neighborlist has been constructed before trying to use it\n\n if not nbr_list.is_built:\n raise ValueError(\"Neighborlist must be built before use\")\n\n # ensure that the cutoff in the neighbor list is the same as the cutoff in the potential\n if nbr_list.cutoff != self.cutoff:\n raise ValueError(\n f\"Neighborlist cutoff ({nbr_list.cutoff}) must be the same as the potential cutoff ({self.cutoff})\"\n )\n\n n_neighbors, pairs, mask, dist, displacement_vectors = nbr_list.calculate(\n positions\n )\n\n potential_energy = jax.vmap(self._compute_energy_masked, in_axes=(0))(\n dist, mask.astype(jnp.float32)\n )\n return potential_energy.sum()\n\n def compute_force(self, positions: jnp.array, nbr_list=None) -> jnp.array:\n \"\"\"\n Compute the LJ force using the negative of jax.grad.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n nbr_list : NeighborList, optional\n Instance of the neighborlist class to use. By default, set to None, which will use an N^2 pairlist\n\n Returns\n -------\n force : jnp.array\n The forces on the particles in the system\n\n \"\"\"\n # force = -jax.grad(self.compute_energy)(positions, nbr_list)\n # return force\n return super().compute_force(positions, nbr_list=nbr_list)\n\n def compute_force_analytical(\n self,\n positions: jnp.array,\n ) -> jnp.array:\n \"\"\"\n Compute the LJ force using the analytical expression for testing purposes.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n\n Returns\n -------\n force : jnp.array\n The forces on the particles in the system\n\n \"\"\"\n dist, displacement_vector, pairs = self.compute_pairlist(positions, self.cutoff)\n\n forces = (\n 24\n * (self.epsilon / (dist * dist))\n * (2 * (self.sigma / dist) ** 12 - (self.sigma / dist) ** 6)\n ).reshape(-1, 1) * displacement_vector\n\n force_array = jnp.zeros((positions.shape[0], 3))\n for force, p1, p2 in zip(forces, pairs[0], pairs[1]):\n force_array = force_array.at[p1].add(force)\n force_array = force_array.at[p2].add(-force)\n return force_array" }, { "identifier": "SamplerState", "path": "chiron/states.py", "snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.\n velocities : unit.Quantity, optional\n The velocities of the particles in the simulation.\n box_vectors : unit.Quantity, optional\n The box vectors defining the simulation's periodic boundary conditions.\n\n \"\"\"\n\n def __init__(\n self,\n x0: unit.Quantity,\n velocities: Optional[unit.Quantity] = None,\n box_vectors: Optional[unit.Quantity] = None,\n ) -> None:\n # NOTE: all units are internally in the openMM units system as documented here:\n # http://docs.openmm.org/latest/userguide/theory/01_introduction.html#units\n if not isinstance(x0, unit.Quantity):\n raise TypeError(f\"x0 must be a unit.Quantity, got {type(x0)} instead.\")\n if velocities is not None and not isinstance(velocities, unit.Quantity):\n raise TypeError(\n f\"velocities must be a unit.Quantity, got {type(velocities)} instead.\"\n )\n if box_vectors is not None and not isinstance(box_vectors, unit.Quantity):\n if isinstance(box_vectors, List):\n try:\n box_vectors = self._convert_from_openmm_box(box_vectors)\n except:\n raise TypeError(f\"Unable to parse box_vectors {box_vectors}.\")\n else:\n raise TypeError(\n f\"box_vectors must be a unit.Quantity or openMM box, got {type(box_vectors)} instead.\"\n )\n if not x0.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"x0 must have units of distance, got {x0.unit} instead.\")\n if velocities is not None and not velocities.unit.is_compatible(\n unit.nanometer / unit.picosecond\n ):\n raise ValueError(\n f\"velocities must have units of distance/time, got {velocities.unit} instead.\"\n )\n if box_vectors is not None and not box_vectors.unit.is_compatible(\n unit.nanometer\n ):\n raise ValueError(\n f\"box_vectors must have units of distance, got {box_vectors.unit} instead.\"\n )\n if box_vectors is not None and box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors must be a 3x3 array, got {box_vectors.shape} instead.\"\n )\n\n self._x0 = x0\n self._velocities = velocities\n self._box_vectors = box_vectors\n self._distance_unit = unit.nanometer\n\n @property\n def x0(self) -> jnp.array:\n return self._convert_to_jnp(self._x0)\n\n @property\n def velocities(self) -> jnp.array:\n if self._velocities is None:\n return None\n return self._convert_to_jnp(self._velocities)\n\n @property\n def box_vectors(self) -> jnp.array:\n if self._box_vectors is None:\n return None\n return self._convert_to_jnp(self._box_vectors)\n\n @x0.setter\n def x0(self, x0: Union[jnp.array, unit.Quantity]) -> None:\n if isinstance(x0, unit.Quantity):\n self._x0 = x0\n else:\n self._x0 = unit.Quantity(x0, self._distance_unit)\n\n @property\n def distance_unit(self) -> unit.Unit:\n return self._distance_unit\n\n def _convert_to_jnp(self, array: unit.Quantity) -> jnp.array:\n \"\"\"\n Convert the sampler state to jnp arrays.\n \"\"\"\n import jax.numpy as jnp\n\n array_ = array.value_in_unit_system(unit.md_unit_system)\n return jnp.array(array_)\n\n def _convert_from_openmm_box(self, openmm_box_vectors: List) -> unit.Quantity:\n box_vec = []\n for i in range(0, 3):\n layer = []\n for j in range(0, 3):\n layer.append(\n openmm_box_vectors[i][j].value_in_unit(openmm_box_vectors[0].unit)\n )\n box_vec.append(layer)\n return unit.Quantity(jnp.array(box_vec), openmm_box_vectors[0].unit)" }, { "identifier": "ThermodynamicState", "path": "chiron/states.py", "snippet": "class ThermodynamicState:\n \"\"\"\n Represents the thermodynamic state of the system.\n\n Parameters\n ----------\n potential : NeuralNetworkPotential\n The potential energy function of the system.\n temperature : unit.Quantity, optional\n The temperature of the simulation.\n volume : unit.Quantity, optional\n The volume of the simulation.\n pressure : unit.Quantity, optional\n The pressure of the simulation.\n\n \"\"\"\n\n def __init__(\n self,\n potential: Optional[NeuralNetworkPotential],\n temperature: Optional[unit.Quantity] = None,\n volume: Optional[unit.Quantity] = None,\n pressure: Optional[unit.Quantity] = None,\n ):\n self.potential = potential\n\n if temperature is not None and not isinstance(temperature, unit.Quantity):\n raise TypeError(\n f\"temperature must be a unit.Quantity, got {type(temperature)} instead.\"\n )\n elif temperature is not None:\n if not temperature.unit.is_compatible(unit.kelvin):\n raise ValueError(\n f\"temperature must have units of temperature, got {temperature.unit} instead.\"\n )\n\n if volume is not None and not isinstance(volume, unit.Quantity):\n raise TypeError(\n f\"volume must be a unit.Quantity, got {type(volume)} instead.\"\n )\n elif volume is not None:\n if not volume.unit.is_compatible(unit.nanometer**3):\n raise ValueError(\n f\"volume must have units of distance**3, got {volume.unit} instead.\"\n )\n if pressure is not None and not isinstance(pressure, unit.Quantity):\n raise TypeError(\n f\"pressure must be a unit.Quantity, got {type(pressure)} instead.\"\n )\n elif pressure is not None:\n if not pressure.unit.is_compatible(unit.atmosphere):\n raise ValueError(\n f\"pressure must have units of pressure, got {pressure.unit} instead.\"\n )\n\n self.temperature = temperature\n if temperature is not None:\n self.beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * (self.temperature))\n else:\n self.beta = None\n\n self.volume = volume\n self.pressure = pressure\n\n from .utils import get_nr_of_particles\n\n self.nr_of_particles = get_nr_of_particles(self.potential.topology)\n self._check_completness()\n\n def check_variables(self) -> None:\n \"\"\"\n Check if all necessary variables are set and log the simulation ensemble.\n \"\"\"\n variables = [\n \"temperature\",\n \"volume\",\n \"pressure\",\n ]\n set_variables = [var for var in variables if getattr(self, var) is not None]\n return set_variables\n\n def _check_completness(self):\n # check which variables are set\n set_variables = self.check_variables()\n\n if len(set_variables) == 0:\n log.info(\"No variables are set.\")\n\n # print all set variables\n for var in set_variables:\n log.info(f\"{var} is set.\")\n\n if self.temperature and self.volume and self.nr_of_particles:\n log.info(\"NVT ensemble simulated.\")\n if self.temperature and self.pressure and self.nr_of_particles:\n log.info(\"NpT ensemble is simulated.\")\n\n @classmethod\n def are_states_compatible(cls, state1, state2):\n \"\"\"\n Check if two simulation states are compatible.\n\n This method should define the criteria for compatibility,\n such as matching number of particles, etc.\n\n Parameters\n ----------\n state1 : SimulationState\n The first simulation state to compare.\n state2 : SimulationState\n The second simulation state to compare.\n\n Returns\n -------\n bool\n True if states are compatible, False otherwise.\n \"\"\"\n pass\n\n def get_reduced_potential(\n self, sampler_state: SamplerState, nbr_list=None\n ) -> float:\n \"\"\"\n Compute the reduced potential for the given sampler state.\n\n Parameters\n ----------\n sampler_state : SamplerState\n The sampler state for which to compute the reduced potential.\n nbr_list : NeighborList or PairList, optional\n The neighbor list or pair list routine to use for calculating the reduced potential.\n\n Returns\n -------\n float\n The reduced potential of the system.\n\n Notes\n -----\n The reduced potential is computed as:\n u = \\beta [U(x) + p V(x) + \\mu N(x)],\n where \\beta is the inverse temperature, p is the pressure,\n \\mu is the chemical potential, x are the atomic positions,\n U(x) is the potential energy, V(x) is the box volume,\n and N(x) is the number of particles.\n \"\"\"\n if self.beta is None:\n self.beta = 1.0 / (\n unit.BOLTZMANN_CONSTANT_kB * (self.temperature * unit.kelvin)\n )\n log.debug(f\"sample state: {sampler_state.x0}\")\n reduced_potential = (\n unit.Quantity(\n self.potential.compute_energy(sampler_state.x0, nbr_list),\n unit.kilojoule_per_mole,\n )\n ) / unit.AVOGADRO_CONSTANT_NA\n log.debug(f\"reduced potential: {reduced_potential}\")\n if self.pressure is not None:\n reduced_potential += self.pressure * self.volume\n\n return self.beta * reduced_potential\n\n def kT_to_kJ_per_mol(self, energy):\n energy = energy * unit.AVOGADRO_CONSTANT_NA\n return energy / self.beta" }, { "identifier": "NeighborListNsqrd", "path": "chiron/neighbors.py", "snippet": "class NeighborListNsqrd(PairsBase):\n \"\"\"\n N^2 neighborlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the neighborlist\n skin: float, default = 0.4\n Skin distance for the neighborlist\n n_max_neighbors: int, default=200\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n This will be checked and dynamically updated during the build stage\n Examples\n --------\n\n\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n skin: unit.Quantity = unit.Quantity(0.4, unit.nanometer),\n n_max_neighbors: float = 200,\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n if not skin.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, skin.unit = {skin.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.skin = skin.value_in_unit_system(unit.md_unit_system)\n self.cutoff_and_skin = self.cutoff + self.skin\n self.n_max_neighbors = n_max_neighbors\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate mask that allows us to remove self-interactions and double-counting of pairs\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n jnp.array\n Bool mask to remove self-interactions and double-counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i < particles_j\n\n return temp_mask\n\n @partial(jax.jit, static_argnums=(0, 5))\n def _build_neighborlist(\n self, particle_i, reduction_mask, pid, coordinates, n_max_neighbors\n ):\n \"\"\"\n Jitted function to build the neighbor list for a single particle\n\n Parameters\n ----------\n particle_i: jnp.array\n X,Y,Z coordinates of particle i\n reduction_mask: jnp.array\n Mask to exclude self-interactions and double counting of pairs\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n n_max_neighbors: int\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n\n Returns\n -------\n neighbor_list_mask: jnp.array\n Mask to exclude padding from the neighbor list\n neighbor_list: jnp.array\n List of particle ids for the neighbors, padded to n_max_neighbors\n n_neighbors: int\n Number of neighbors for the particle\n \"\"\"\n\n # calculate the displacement between particle i and all other particles\n r_ij, dist = self.space.displacement(particle_i, coordinates)\n\n # neighbor_mask will be an array of length n_particles (i.e., length of coordinates)\n # where each element is True if the particle is a neighbor, False if it is not\n # subject to both the cutoff+skin and the reduction mask that eliminates double counting and self-interactions\n neighbor_mask = jnp.where(\n (dist < self.cutoff_and_skin) & (reduction_mask), True, False\n )\n # when we pad the neighbor list, we will use last particle id in the neighbor list\n # this choice was made such that when we use the neighbor list in the masked energy calculat\n # the padded values will result in reasonably well defined values\n fill_value = jnp.argmax(neighbor_mask)\n fill_value = jnp.where(fill_value == pid, fill_value + 1, fill_value)\n\n # count up the number of neighbors\n n_neighbors = jnp.where(neighbor_mask, 1, 0).sum()\n\n # since neighbor_mask indices have a one-to-one correspondence to particle ids,\n # applying jnp.where, will return an array of the indices that are neighbors.\n # since this needs to be uniformly sized, we can just fill this array up to the n_max_neighbors.\n neighbor_list = jnp.array(\n jnp.where(neighbor_mask, size=n_max_neighbors, fill_value=fill_value),\n dtype=jnp.uint32,\n )\n # we need to generate a new mask associatd with the padded neighbor list\n # to be able to quickly exclude the padded values from the neighbor list\n neighbor_list_mask = jnp.where(jnp.arange(n_max_neighbors) < n_neighbors, 1, 0)\n\n del r_ij, dist\n return neighbor_list_mask, neighbor_list, n_neighbors\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # the call to x0 and box_vectors automatically convert these to jnp arrays in the correct unit system\n if isinstance(coordinates, unit.Quantity):\n if not coordinates.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Coordinates require distance units, not {coordinates.unit}\"\n )\n coordinates = coordinates.value_in_unit_system(unit.md_unit_system)\n\n if isinstance(box_vectors, unit.Quantity):\n if not box_vectors.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Box vectors require distance unit, not {box_vectors.unit}\"\n )\n box_vectors = box_vectors.value_in_unit_system(unit.md_unit_system)\n\n if box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors should be a 3x3 array, shape provided: {box_vectors.shape}\"\n )\n\n self.ref_coordinates = coordinates\n self.box_vectors = box_vectors\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(\n range(0, self.ref_coordinates.shape[0]), dtype=jnp.uint32\n )\n\n # calculate which pairs to exclude\n reduction_mask = self._pairs_mask(self.particle_ids)\n\n # calculate the distance for all pairs this will return\n # neighbor_mask: an array of shape (n_particles, n_particles) where each element is the mask\n # to determine if the particle is a neighbor\n # neighbor_list: an array of shape (n_particles, n_max_neighbors) where each element is the particle id of the neighbor\n # this is padded with zeros to ensure a uniform size;\n # n_neighbors: an array of shape (n_particles) where each element is the number of neighbors for that particle\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n while jnp.any(self.n_neighbors == self.n_max_neighbors).block_until_ready():\n log.debug(\n f\"Increasing n_max_neighbors from {self.n_max_neighbors} to at {jnp.max(self.n_neighbors)+10}\"\n )\n self.n_max_neighbors = int(jnp.max(self.n_neighbors) + 10)\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and its neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the neighbors of particle1\n neighbor_mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n dist: jnp.array\n Array of distances between the particle and its neighbors\n r_ij: jnp.array\n Array of displacement vectors between the particle and its neighbors\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of number of neighbors for each particle\n neighbor_list: jnp.array\n Array of particle ids for the neighbors, padded to n_max_neighbors. Shape (n_particles, n_max_neighbors)\n padding_mask: jnp.array\n Array of masks to exclude padding from the neighbor list of each particle. Shape (n_particles, n_max_neighbors)\n dist: jnp.array\n Array of distances between each particle and its neighbors. Shape (n_particles, n_max_neighbors)\n r_ij: jnp.array\n Array of displacement vectors between each particle and its neighbors. Shape (n_particles, n_max_neighbors, 3)\n \"\"\"\n # coordinates = sampler_state.x0\n # note, we assume the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.neighbor_list, self.neighbor_mask, coordinates)\n # mask = mask.reshape(-1, self.n_max_neighbors)\n return n_neighbors, self.neighbor_list, padding_mask, dist, r_ij\n\n @partial(jax.jit, static_argnums=(0,))\n def _calculate_particle_displacement(self, particle, coordinates, ref_coordinates):\n \"\"\"\n Calculate the displacement of a particle from the reference coordinates.\n If the displacement exceeds the half the skin distance, return True, otherwise return False.\n\n This function is designed to allow it to be jitted and vmapped over particle indices.\n\n Parameters\n ----------\n particle: int\n Particle id\n coordinates: jnp.array\n Array of particle coordinates\n ref_coordinates: jnp.array\n Array of reference particle coordinates\n\n Returns\n -------\n bool\n True if the particle is outside the skin distance, False if it is not.\n \"\"\"\n # calculate the displacement of a particle from the initial coordinates\n\n r_ij, displacement = self.space.displacement(\n coordinates[particle], ref_coordinates[particle]\n )\n\n status = jnp.where(displacement >= self.skin / 2.0, True, False)\n del displacement\n return status\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if the neighbor list needs to be rebuilt based on displacement of the particles from the reference coordinates.\n If a particle moves more than 0.5 skin distance, the neighborlist will be rebuilt.\n Will also return True if the size of the coordinates array changes.\n\n Note, this could also accept a user defined criteria for distance, but this is not implemented yet.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if the neighbor list needs to be rebuilt, False if it does not.\n \"\"\"\n\n if self.ref_coordinates.shape[0] != coordinates.shape[0]:\n return True\n\n status = jax.vmap(\n self._calculate_particle_displacement, in_axes=(0, None, None)\n )(self.particle_ids, coordinates, self.ref_coordinates)\n if jnp.any(status):\n del status\n return True\n else:\n del status\n return False" }, { "identifier": "OrthogonalPeriodicSpace", "path": "chiron/neighbors.py", "snippet": "class OrthogonalPeriodicSpace(Space):\n \"\"\"\n Defines the simulation space for an orthogonal periodic system.\n\n \"\"\"\n\n @property\n def box_vectors(self) -> jnp.array:\n return self._box_vectors\n\n @box_vectors.setter\n def box_vectors(self, box_vectors: jnp.array) -> None:\n self._box_vectors = box_vectors\n self._box_lengths = jnp.array(\n [box_vectors[0][0], box_vectors[1][1], box_vectors[2][2]]\n )\n\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self, xyz_1: jnp.array, xyz_2: jnp.array\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculated corrected displacement vector\n r_ij = (\n jnp.mod(r_ij + self._box_lengths * 0.5, self._box_lengths)\n - self._box_lengths * 0.5\n )\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n xyz = xyz - jnp.floor(xyz / self._box_lengths) * self._box_lengths\n\n return xyz" }, { "identifier": "PairList", "path": "chiron/neighbors.py", "snippet": "class PairList(PairsBase):\n \"\"\"\n N^2 pairlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the pair list calculation\n Examples\n --------\n >>> from chiron.neighbors import PairList, OrthogonalPeriodicSpace\n >>> from chiron.states import SamplerState\n >>> import jax.numpy as jnp\n >>>\n >>> space = OrthogonalPeriodicSpace()\n >>> pair_list = PairList(space, cutoff=2.5)\n >>> sampler_state = SamplerState(x0=jnp.array([[0.0, 0.0, 0.0], [2, 0.0, 0.0], [0.0, 2, 0.0]]),\n >>> box_vectors=jnp.array([[10, 0.0, 0.0], [0.0, 10, 0.0], [0.0, 0.0, 10]]))\n >>> pair_list.build_from_state(sampler_state)\n >>>\n >>> # mask and distances are of shape (n_particles, n_particles-1),\n >>> displacement_vectors of shape (n_particles, n_particles-1, 3)\n >>> # mask, is a bool array that is True if the particle is within the cutoff distance, False if it is not\n >>> # n_pairs is of shape (n_particles) and is per row sum of the mask. The mask ensure we also do not double count pairs\n >>> n_pairs, mask, distances, displacement_vectors = pair_list.calculate(sampler_state.x0)\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_and_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate all pairs (excluding self interactions)\n and mask that allows us to remove double-counting of pairs.\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n all_pairs: jnp.array\n Array of all pairs (excluding self interactions), of size (n_particles, n_particles-1)\n reduction_mask: jnp.array\n Bool mask that identifies which pairs to exclude to remove double counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n # we'll just keep with naming syntax for future flexibility\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i != particles_j\n all_pairs = jax.vmap(self._remove_self_interactions, in_axes=(0, 0))(\n particles_j, temp_mask\n )\n del temp_mask\n all_pairs = jnp.array(all_pairs[0], dtype=jnp.uint32)\n\n reduction_mask = jnp.where(particles_i < all_pairs, True, False)\n\n return all_pairs, reduction_mask\n\n @partial(jax.jit, static_argnums=(0,))\n def _remove_self_interactions(self, particles, temp_mask):\n return jnp.where(\n temp_mask, size=particles.shape[0] - 1, fill_value=particles.shape[0] - 1\n )\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # this will set self.ref_coordinates=coordinates and self.box_vectors\n self._validate_build_inputs(coordinates, box_vectors)\n\n self.n_particles = self.ref_coordinates.shape[0]\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(range(0, coordinates.shape[0]), dtype=jnp.uint32)\n\n # calculate which pairs to exclude\n self.all_pairs, self.reduction_mask = self._pairs_and_mask(self.particle_ids)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and all possible neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the possible particle pairs of particle1\n neighbor_mask: jnp.array\n Mask to exclude double particles to prevent double counting\n coordinates: jnp.array\n X,Y,Z coordinates of all particles, shaped (n_particles, 3)\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding particles not within the cutoff particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n dist: jnp.array\n Array of distances between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n r_ij: jnp.array\n Array of displacement vectors between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1, 3) as it excludes self interactions\n\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of the number of interacting particles (i.e., where dist < cutoff). Shape: (n_particles)\n pairs: jnp.array\n Array of particle ids that were considered for interaction. Shape: (n_particles, n_particles-1)\n padding_mask: jnp.array\n Array used to masks non interaction particle pairs. Shape: (n_particles, n_particles-1)\n dist: jnp.array\n Array of distances between pairs in the system. Shape: (n_particles, n_particles-1)\n r_ij: jnp.array\n Array of displacement vectors between particle pairs. Shape: (n_particles, n_particles-1, 3).\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n raise ValueError(\n f\"Number of particles cannot changes without rebuilding. \"\n f\"Coordinates must have shape ({self.n_particles}, 3), found {coordinates.shape}\"\n )\n\n # coordinates = self.space.wrap(coordinates)\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.all_pairs, self.reduction_mask, coordinates)\n\n return n_neighbors, self.all_pairs, padding_mask, dist, r_ij\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if we need to reconstruct internal arrays.\n For a simple pairlist this will always return False, unless the number of particles change.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if we need to rebuild the neighbor list, False if we do not.\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n return True\n else:\n return False" }, { "identifier": "SimulationReporter", "path": "chiron/reporters.py", "snippet": "class SimulationReporter:\n def __init__(self, filename: str, topology: Topology, buffer_size: int = 1):\n \"\"\"\n Initialize the SimulationReporter.\n\n Parameters\n ----------\n filename : str\n Name of the HDF5 file to write the simulation data.\n topology: openmm.Topology\n buffer_size : int, optional\n Number of data points to buffer before writing to disk (default is 1).\n\n \"\"\"\n import mdtraj as md\n\n self.filename = filename\n self.buffer_size = buffer_size\n self.topology = topology\n self.buffer = {}\n self.h5file = h5py.File(filename, \"a\")\n log.info(f\"Writing simulation data to {filename}\")\n\n def get_available_keys(self):\n return self.h5file.keys()\n\n def report(self, data_dict):\n \"\"\"\n Add new data to the buffer and write the buffer to disk if it's full.\n\n Parameters\n ----------\n data_dict : dict\n Dictionary containing data to report. Keys are data labels (e.g., 'energy'),\n and values are the data points (usually numpy arrays).\n\n \"\"\"\n for key, value in data_dict.items():\n if key not in self.buffer:\n self.buffer[key] = []\n self.buffer[key].append(value)\n\n if len(self.buffer[key]) >= self.buffer_size:\n self._write_to_disk(key)\n\n def _write_to_disk(self, key):\n \"\"\"\n Write buffered data of a given key to the HDF5 file.\n\n Parameters\n ----------\n key : str\n The key of the data to write to disk.\n\n \"\"\"\n data = np.array(self.buffer[key])\n if key in self.h5file:\n dset = self.h5file[key]\n dset.resize((dset.shape[0] + data.shape[0],) + data.shape[1:])\n dset[-data.shape[0] :] = data\n else:\n log.debug(f\"Creating {key} in {self.filename}\")\n self.h5file.create_dataset(\n key, data=data, maxshape=(None,) + data.shape[1:], chunks=True\n )\n\n self.buffer[key] = []\n\n def close(self):\n \"\"\"\n Write any remaining data in the buffer to disk and close the HDF5 file.\n\n \"\"\"\n for key in self.buffer:\n if self.buffer[key]:\n self._write_to_disk(key)\n self.h5file.close()\n\n def get_property(self, name: str):\n \"\"\"\n Get the property from the HDF5 file.\n\n Parameters\n ----------\n name : str\n Name of the property to get.\n\n Returns\n -------\n np.ndarray\n The property.\n\n \"\"\"\n if name not in self.h5file:\n log.debug(f\"{name} not in HDF5 file\")\n return None\n else:\n return np.array(self.h5file[name])\n\n def get_mdtraj_trajectory(self):\n import mdtraj as md\n\n return md.Trajectory(\n xyz=self.get_property(\"traj\"),\n topology=md.Topology.from_openmm(self.topology),\n unitcell_lengths=self.get_property(\"box_vectors\"),\n unitcell_angles=self.get_property(\"box_angles\"),\n )" }, { "identifier": "MetropolisDisplacementMove", "path": "chiron/mcmc.py", "snippet": "class MetropolisDisplacementMove(MetropolizedMove):\n \"\"\"A metropolized move that randomly displace a subset of atoms.\n\n Parameters\n ----------\n displacement_sigma : openmm.unit.Quantity\n The standard deviation of the normal distribution used to propose the\n random displacement (units of length, default is 1.0*nanometer).\n atom_subset : slice or list of int, optional\n If specified, the move is applied only to those atoms specified by these\n indices. If None, the move is applied to all atoms (default is None).\n\n Attributes\n ----------\n n_accepted : int\n The number of proposals accepted.\n n_proposed : int\n The total number of attempted moves.\n displacement_sigma\n atom_subset\n\n See Also\n --------\n MetropolizedMove\n\n \"\"\"\n\n def __init__(\n self,\n seed: int = 1234,\n displacement_sigma=1.0 * unit.nanometer,\n nr_of_moves: int = 100,\n atom_subset: Optional[List[int]] = None,\n simulation_reporter: Optional[SimulationReporter] = None,\n ):\n \"\"\"\n Initialize the MCMC class.\n\n Parameters\n ----------\n seed : int, optional\n The seed for the random number generator. Default is 1234.\n displacement_sigma : float or unit.Quantity, optional\n The standard deviation of the displacement for each move. Default is 1.0 nm.\n nr_of_moves : int, optional\n The number of moves to perform. Default is 100.\n atom_subset : list of int, optional\n A subset of atom indices to consider for the moves. Default is None.\n simulation_reporter : SimulationReporter, optional\n The reporter to write the data to. Default is None.\n Returns\n -------\n None\n \"\"\"\n\n super().__init__(nr_of_moves=nr_of_moves, seed=seed)\n self.displacement_sigma = displacement_sigma\n self.atom_subset = atom_subset\n self.simulation_reporter = simulation_reporter\n if self.simulation_reporter is not None:\n log.info(\n f\"Using reporter {self.simulation_reporter} saving to {self.simulation_reporter.filename}\"\n )\n\n def displace_positions(\n self, positions: jnp.array, displacement_sigma=1.0 * unit.nanometer\n ):\n \"\"\"Return the positions after applying a random displacement to them.\n\n Parameters\n ----------\n positions : nx3 jnp.array unit.Quantity\n The positions to displace.\n displacement_sigma : openmm.unit.Quantity\n The standard deviation of the normal distribution used to propose\n the random displacement (units of length, default is 1.0*nanometer).\n\n Returns\n -------\n rotated_positions : nx3 numpy.ndarray openmm.unit.Quantity\n The displaced positions.\n\n \"\"\"\n import jax.random as jrandom\n\n self.key, subkey = jrandom.split(self.key)\n nr_of_atoms = positions.shape[0]\n # log.debug(f\"Number of atoms is {nr_of_atoms}.\")\n unitless_displacement_sigma = displacement_sigma.value_in_unit_system(\n unit.md_unit_system\n )\n # log.debug(f\"Displacement sigma is {unitless_displacement_sigma}.\")\n displacement_vector = (\n jrandom.normal(subkey, shape=(nr_of_atoms, 3)) * 0.1\n ) # NOTE: convert from Angstrom to nm\n scaled_displacement_vector = displacement_vector * unitless_displacement_sigma\n # log.debug(f\"Unscaled Displacement vector is {displacement_vector}.\")\n # log.debug(f\"Scaled Displacement vector is {scaled_displacement_vector}.\")\n updated_position = positions + scaled_displacement_vector\n\n return updated_position\n\n def _propose_positions(self, initial_positions: jnp.array) -> jnp.array:\n \"\"\"Implement MetropolizedMove._propose_positions for apply().\"\"\"\n return self.displace_positions(initial_positions, self.displacement_sigma)\n\n def run(\n self,\n sampler_state: SamplerState,\n thermodynamic_state: ThermodynamicState,\n nbr_list=None,\n progress_bar=True,\n ):\n from tqdm import tqdm\n\n for trials in (\n tqdm(range(self.nr_of_moves)) if progress_bar else range(self.nr_of_moves)\n ):\n self.apply(\n thermodynamic_state, sampler_state, self.simulation_reporter, nbr_list\n )\n if trials % 100 == 0:\n log.debug(f\"Acceptance rate: {self.n_accepted / self.n_proposed}\")\n if self.simulation_reporter is not None:\n self.simulation_reporter.report(\n {\n \"Acceptance rate\": self.n_accepted / self.n_proposed,\n \"step\": self.n_proposed,\n }\n )\n\n log.info(f\"Acceptance rate: {self.n_accepted / self.n_proposed}\")" } ]
from openmmtools.testsystems import LennardJonesFluid from chiron.potential import LJPotential from openmm import unit from chiron.states import SamplerState, ThermodynamicState from chiron.neighbors import NeighborListNsqrd, OrthogonalPeriodicSpace from chiron.neighbors import PairList from chiron.reporters import SimulationReporter from chiron.mcmc import MetropolisDisplacementMove import os
13,250
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
sampler_state = SamplerState(
1
2023-11-07 18:17:43+00:00
16k
WolfgangFahl/dcm
dcm/dcm_webserver.py
[ { "identifier": "Assessment", "path": "dcm/dcm_assessment.py", "snippet": "class Assessment:\n \"\"\"\n Assessment for CompetenceTree\n \"\"\"\n\n def __init__(\n self,\n webserver: NiceGuiWebserver,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n debug: bool = False,\n ):\n \"\"\"\n initialize the assessment\n\n Args:\n webserver(NiceguiWebServer): the webserver context\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n debug(bool): if True show debugging information\n \"\"\"\n self.webserver = webserver\n self.debug = debug\n self.reset(dcm=dcm, learner=learner)\n self.setup_ui()\n\n def reset(\n self,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n ):\n \"\"\"\n (re)set the assessment\n\n Args:\n webserver(NiceguiWebServer): the webserver context\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n \"\"\"\n self.dcm = dcm\n self.competence_tree = dcm.competence_tree\n self.learner = learner\n self.achievement_index = 0\n # do we need setup the achievements?\n if self.learner.achievements is None:\n self.learner.achievements = []\n self.setup_achievements()\n self.total = len(self.learner.achievements)\n\n def clear(self):\n \"\"\"\n clear the ui\n \"\"\"\n self.container.clear()\n\n @property\n def current_achievement(self) -> Achievement:\n if self.achievement_index < 0 or self.achievement_index > len(\n self.learner.achievements\n ):\n raise ValueError(f\"invalid achievement index {self.achievement_index}\")\n achievement = self.learner.achievements[self.achievement_index]\n return achievement\n\n def setup_achievements(self):\n \"\"\"\n Setup achievements based on the competence tree.\n\n This method iterates over the competence aspects and their facets,\n constructs a path for each facet, and creates an Achievement instance\n based on the path. These achievements are then added to the learner's\n achievements list.\n \"\"\"\n for aspect in self.competence_tree.aspects:\n for area in aspect.areas:\n area_path: str = f\"{self.competence_tree.id}/{aspect.id}\"\n self.add_achievement(area_path)\n for facet in area.facets:\n # Construct the path for the facet\n facet_path=f\"{area_path}/{facet.id}\"\n self.add_achievement(facet_path)\n \n def add_achievement(self,path):\n # Create a new Achievement instance with the constructed path\n new_achievement = Achievement(\n path=path,\n )\n self.learner.add_achievement(new_achievement)\n\n def get_index_str(self) -> str:\n index_str = f\"{self.achievement_index+1:2}/{self.total:2}\"\n return index_str\n\n def setup_ui(self):\n \"\"\"\n display my competence Tree elements\n \"\"\"\n with ui.grid(columns=1).classes(\"w-full\") as self.container:\n self.progress_bar = NiceguiProgressbar(\n total=self.total, desc=\"self assessment\", unit=\"facets\"\n )\n self.progress_bar.reset()\n with ui.row():\n ui.button(\"\", icon=\"arrow_back\", on_click=lambda _args: self.step(-1))\n ui.button(\"\", icon=\"arrow_forward\", on_click=lambda _args: self.step(1))\n with ui.row():\n with ui.card() as self.achievement_view:\n self.index_view = ui.label(self.get_index_str())\n self.link_view = ui.html()\n self.markdown_view = ui.markdown()\n self.button_row = ButtonRow(\n self, self.competence_tree, self.current_achievement\n )\n\n def show_progress(self):\n \"\"\"\n Update the progress bar based on the\n number of achievements with a non-None level value.\n \"\"\"\n count = sum(\n 1\n for achievement in self.learner.achievements\n if achievement.level is not None\n )\n self.progress_bar.total = self.total\n self.progress_bar.update_value(count)\n\n async def step(self, step: int = 0):\n self.update_achievement_view(step)\n\n def update_achievement_view(self, step: int = 0):\n \"\"\"\n display the active achievement as the step indicates\n \"\"\"\n self.show_progress()\n self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)\n if self.achievement_index + step < 0:\n ui.notify(\"first achievement reached!\")\n step = 0\n if self.achievement_index + step < len(self.learner.achievements):\n self.achievement_index += step\n self.index_view.text = self.get_index_str()\n achievement = self.current_achievement\n self.button_row.achievement = achievement\n self.button_row.set_button_states(achievement)\n competence_element = self.competence_tree.lookup_by_path(achievement.path)\n if not competence_element:\n ui.notify(\"invalid path: {achievement.path}\")\n self.markdown_view.content = f\"⚠️ {achievement.path}\"\n else:\n if hasattr(competence_element, \"path\"):\n if competence_element.url:\n link = Link.create(\n competence_element.url, competence_element.path\n )\n else:\n link = competence_element.path\n else:\n link = \"⚠️ - competence element path missing\"\n self.link_view.content = link\n description = competence_element.description or \"\"\n if isinstance(competence_element, CompetenceArea):\n aspect = competence_element.aspect\n description = f\"### {aspect.name}\\n\\n**{competence_element.name}**:\\n\\n{description}\"\n if isinstance(competence_element, CompetenceFacet):\n area = competence_element.area\n description = f\"### {area.name}\\n\\n**{competence_element.name}**:\\n\\n{description}\"\n self.markdown_view.content = description\n else:\n ui.notify(\"Done!\")" }, { "identifier": "DcmChart", "path": "dcm/dcm_chart.py", "snippet": "class DcmChart:\n \"\"\"\n a Dynamic competence map chart\n \"\"\"\n\n def __init__(self, dcm: DynamicCompetenceMap):\n \"\"\"\n Constructor\n \"\"\"\n self.dcm = dcm\n\n def generate_svg(\n self,\n filename: Optional[str] = None,\n learner: Optional[Learner] = None,\n config: Optional[SVGConfig] = None,\n ) -> str:\n \"\"\"\n Generate the SVG markup and optionally save it to a file. If a filename is given, the method\n will also save the SVG to that file. The SVG is generated based on internal state not shown here.\n\n Args:\n filename (str, optional): The path to the file where the SVG should be saved. Defaults to None.\n learner(Learner): the learner to show the achievements for\n config (SVGConfig, optional): The configuration for the SVG canvas and legend. Defaults to default values.\n\n Returns:\n str: The SVG markup.\n \"\"\"\n if config is None:\n config = SVGConfig() # Use default configuration if none provided\n svg_markup = self.generate_svg_markup(\n self.dcm.competence_tree, learner=learner, config=config\n )\n if filename:\n self.save_svg_to_file(svg_markup, filename)\n return svg_markup\n\n def generate_donut_segment_for_element(\n self,\n svg: SVG,\n element: CompetenceElement,\n learner: Learner,\n segment: DonutSegment,\n ):\n \"\"\"\n generate a donut segment for a given element of\n the CompetenceTree\n \"\"\"\n # Add the element segment as a donut segment\n element_url = (\n element.url\n if element.url\n else f\"{self.lookup_url}/description/{element.path}\"\n if self.lookup_url is not None\n else None\n )\n show_as_popup = element.url is None\n element_config = element.to_svg_node_config(\n url=element_url,\n show_as_popup=show_as_popup,\n x=self.cx,\n y=self.cy,\n )\n # check learner achievements\n if learner:\n achievement = learner.achievements_by_path.get(element.path, None)\n if achievement and achievement.level:\n element_config.element_class = \"selected\"\n svg.add_donut_segment(config=element_config, segment=segment)\n\n def generate_pie_elements(\n self,\n level: int,\n svg: SVG,\n parent_element: CompetenceElement,\n learner: Learner,\n segment: DonutSegment,\n ):\n \"\"\"\n generate the pie elements (donut segments) for the subelements\n of the given parent_element at the given level\n e.g. aspects, areas or facets - taking the learner\n achievements into account if a corresponding achievement\n is found. The segment limits the area in which the generation may operate\n \"\"\"\n sub_element_name = self.levels[level]\n # get the elements to be displayed\n elements = getattr(parent_element, sub_element_name)\n total = len(elements)\n # are there any elements to be shown?\n if total > 0:\n angle_per_element = (segment.end_angle - segment.start_angle) / total\n start_angle = segment.start_angle\n for element in elements:\n end_angle = start_angle + angle_per_element\n sub_segment = DonutSegment(\n segment.outer_radius,\n segment.outer_radius + self.tree_radius*2,\n start_angle,\n end_angle,\n )\n self.generate_donut_segment_for_element(\n svg, element, learner, segment=sub_segment\n )\n start_angle = end_angle\n if level + 1 < len(self.levels):\n self.generate_pie_elements(\n level=level + 1,\n svg=svg,\n parent_element=element,\n learner=learner,\n segment=sub_segment,\n )\n\n def generate_svg_markup(\n self,\n competence_tree: CompetenceTree = None,\n learner: Learner = None,\n config: SVGConfig = None,\n with_java_script: bool = True,\n lookup_url: str = \"\",\n ) -> str:\n \"\"\"\n generate the SVG markup for the given CompetenceTree and learner\n\n Args:\n\n \"\"\"\n if competence_tree is None:\n competence_tree = self.dcm.competence_tree\n\n svg = SVG(config)\n self.svg = svg\n config = svg.config\n # center of circle\n self.cx = config.width // 2\n self.cy = (config.total_height - config.legend_height) // 2\n self.levels = [\"aspects\", \"areas\", \"facets\"]\n self.tree_radius = config.width / 2 / 8\n\n self.lookup_url = (\n competence_tree.lookup_url if competence_tree.lookup_url else lookup_url\n )\n\n circle_config = competence_tree.to_svg_node_config(\n x=self.cx, \n y=self.cy, \n width=self.tree_radius\n )\n svg.add_circle(config=circle_config)\n\n segment = DonutSegment(\n inner_radius=0, \n outer_radius=self.tree_radius\n )\n self.generate_pie_elements(\n level=0,\n svg=svg,\n parent_element=competence_tree,\n learner=learner,\n segment=segment,\n )\n if config.legend_height > 0:\n competence_tree.add_legend(svg)\n\n return svg.get_svg_markup(with_java_script=with_java_script)\n\n def save_svg_to_file(self, svg_markup: str, filename: str):\n \"\"\"\n Save the SVG content to a file\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(svg_markup)" }, { "identifier": "CompetenceTree", "path": "dcm/dcm_core.py", "snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )" }, { "identifier": "DynamicCompetenceMap", "path": "dcm/dcm_core.py", "snippet": "class DynamicCompetenceMap:\n \"\"\"\n a visualization of a competence map\n \"\"\"\n\n def __init__(self, competence_tree: CompetenceTree):\n \"\"\"\n constructor\n \"\"\"\n self.competence_tree = competence_tree\n self.svg = None\n\n @property\n def main_id(self):\n main_id = self.competence_tree.id\n return main_id\n\n @classmethod\n def examples_path(cls) -> str:\n # the root directory (default: examples)\n path = os.path.join(os.path.dirname(__file__), \"../dcm_examples\")\n path = os.path.abspath(path)\n return path\n\n @classmethod\n def get_example_dcm_definitions(\n cls,\n markup: str = \"json\",\n required_keys: Optional[Tuple] = None,\n as_text: bool = True,\n ) -> dict:\n \"\"\"\n Retrieve example Dynamic Competence Map (DCM) definitions from files in the specified markup format (either JSON or YAML).\n\n Args:\n markup (str): The markup format of the input files. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n required_keys (Optional[Tuple]): A tuple of keys required to validate the data. If not provided, all keys will be considered valid.\n as_text (bool): If True, returns the file content as text; if False, returns parsed data. Defaults to True.\n\n Returns:\n dict: A dictionary where each key is the prefix of the file name and the value is the file content as text or parsed data, depending on the value of 'as_text'.\n\n Raises:\n Exception: If there's an error in reading or parsing the file, or if the file does not meet the required validation criteria.\n \"\"\"\n example_dcm_defs = {}\n file_ext = f\".{markup}\"\n examples_path = cls.examples_path()\n for dirpath, _dirnames, filenames in os.walk(examples_path):\n for filename in filenames:\n if filename.endswith(file_ext):\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as definition_file:\n file_prefix = filename.replace(file_ext, \"\")\n definition_text = definition_file.read()\n try:\n definition_data = cls.parse_markup(definition_text, markup)\n if cls.is_valid_definition(definition_data, required_keys):\n if as_text:\n example_dcm_defs[file_prefix] = definition_text\n else:\n example_dcm_defs[file_prefix] = definition_data\n except Exception as ex:\n cls.handle_markup_issue(\n filename, definition_text, ex, markup\n )\n return example_dcm_defs\n\n @classmethod\n def parse_markup(cls, text: str, markup: str) -> Union[dict, list]:\n \"\"\"\n Parse the given text as JSON or YAML based on the specified markup type.\n\n Args:\n text (str): The string content to be parsed.\n markup (str): The type of markup to use for parsing. Supported values are 'json' and 'yaml'.\n\n Returns:\n Union[dict, list]: The parsed data, which can be either a dictionary or a list, depending on the content.\n\n Raises:\n ValueError: If an unsupported markup format is specified.\n \"\"\"\n if markup == \"json\":\n data=json.loads(text)\n return data\n elif markup == \"yaml\":\n data=yaml.safe_load(text)\n return data\n else:\n raise ValueError(f\"Unsupported markup format: {markup}\")\n\n @classmethod\n def handle_markup_issue(cls, name: str, definition_string: str, ex, markup: str):\n if isinstance(ex, JSONDecodeError):\n lines = definition_string.splitlines() # Split the string into lines\n err_line = lines[ex.lineno - 1] # JSONDecodeError gives 1-based lineno\n pointer = (\n \" \" * (ex.colno - 1) + \"^\"\n ) # Create a pointer string to indicate the error position\n error_message = (\n f\"{name}:JSON parsing error on line {ex.lineno} column {ex.colno}:\\n\"\n f\"{err_line}\\n\"\n f\"{pointer}\\n\"\n f\"{ex.msg}\"\n )\n raise ValueError(error_message) # Raise a new exception with this message\n else:\n error_message = f\"error in {name}: {str(ex)}\"\n raise ValueError(error_message)\n\n @classmethod\n def is_valid_definition(cls, definition_data, required_keys: Tuple):\n return all(key in definition_data for key in required_keys)\n\n @classmethod\n def get_examples(cls, content_class=CompetenceTree, markup: str = \"json\") -> dict:\n examples = {}\n for name, definition_string in cls.get_example_dcm_definitions(\n required_keys=content_class.required_keys(), markup=markup\n ).items():\n example = cls.from_definition_string(\n name, definition_string, content_class, markup=markup\n )\n # check the type of the example\n example_id = example.main_id\n examples[example_id] = example\n return examples\n\n @classmethod\n def from_definition_string(\n cls, name: str, definition_string: str, content_class, markup: str = \"json\"\n ) -> Any:\n \"\"\"\n Load a DynamicCompetenceMap or Learner instance from a definition string (either JSON or YAML).\n\n Args:\n name (str): A name identifier for the data source.\n definition_string (str): The string content of the definition.\n content_class (dataclass_json): The class which will be instantiated with the parsed data.\n markup (str): The markup format of the data. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n\n Returns:\n DynamicCompetenceMap: An instance of DynamicCompetenceMap loaded with the parsed data.\n\n Raises:\n ValueError: If there's an error in parsing the data.\n \"\"\"\n try:\n data = cls.parse_markup(definition_string, markup)\n content = content_class.from_dict(data)\n if isinstance(content, CompetenceTree):\n return DynamicCompetenceMap(content)\n else:\n return content\n except Exception as ex:\n cls.handle_markup_issue(name, definition_string, ex, markup)" }, { "identifier": "Learner", "path": "dcm/dcm_core.py", "snippet": "class Learner:\n \"\"\"\n A learner with achievements.\n Attributes:\n learner_id (str): Identifier for the learner.\n achievements (Dict[str, List[Achievement]]):\n A dictionary where each key is a competence element identifier\n and the value is a list of Achievement instances for that tree.\n \"\"\"\n\n learner_id: str\n achievements: Optional[List[Achievement]] = field(default=None)\n\n def __post_init__(self):\n self.achievements_by_path = {}\n if self.achievements:\n for achievement in self.achievements:\n self.achievements_by_path[achievement.path] = achievement\n\n @classmethod\n def required_keys(cls):\n keys = {\"achievements\"}\n return keys\n\n @property\n def main_id(self):\n main_id = self.learner_id\n return main_id\n\n def add_achievement(self, new_achievement):\n self.achievements.append(new_achievement)\n self.achievements_by_path[new_achievement.path] = new_achievement\n\n def get_competence_tree_ids(self) -> List[str]:\n \"\"\"\n Get all unique competence tree IDs of my achievements.\n\n Returns:\n List[str]: A list of unique competence tree IDs.\n \"\"\"\n # Assuming that the learner's achievements are stored in a list called self.achievements\n # You can modify this part according to your actual data structure.\n\n # Create a set to store unique competence tree IDs\n unique_tree_ids = set()\n\n # Iterate through the learner's achievements\n for achievement in self.achievements:\n # Assuming each achievement has a tree_id attribute\n tree_id = achievement.tree_id\n\n # Add the tree_id to the set\n unique_tree_ids.add(tree_id)\n\n # Convert the set to a list and return\n return list(unique_tree_ids)" }, { "identifier": "SVG", "path": "dcm/svg.py", "snippet": "class SVG:\n \"\"\"\n Class for creating SVG drawings.\n\n Attributes:\n config (SVGConfig): Configuration for the SVG drawing.\n \"\"\"\n\n def __init__(self, config: SVGConfig = None):\n \"\"\"\n Initialize SVG object with given configuration.\n\n Args:\n config (SVGConfig): Configuration for SVG generation.\n \"\"\"\n self.config = config if config else SVGConfig()\n self.width = self.config.width\n self.height = self.config.height\n self.elements = []\n self.indent = self.config.indent\n\n def get_svg_style(self) -> str:\n \"\"\"\n Define styles for SVG elements.\n\n Returns:\n str: String containing style definitions for SVG.\n \"\"\"\n return (\n f\"{self.indent}<style>\\n\"\n f\"{self.indent * 2}.hoverable {{ cursor: pointer; fill-opacity: 1; stroke: black; stroke-width: 0.5; }}\\n\"\n f\"{self.indent * 2}.hoverable:hover {{ fill-opacity: 0.7; }}\\n\"\n f\"{self.indent * 2}.selected {{ fill-opacity: 0.5; stroke: blue; stroke-width: 1.5;}}\\n\"\n f\"{self.indent * 2}.popup {{\\n\"\n f\"{self.indent * 3}border: 2px solid black;\\n\"\n f\"{self.indent * 3}border-radius: 15px;\\n\"\n f\"{self.indent * 3}overflow: auto;\\n\" # changed to 'auto' to allow scrolling only if needed\n f\"{self.indent * 3}background: white;\\n\"\n f\"{self.indent * 3}box-sizing: border-box;\\n\" # ensures padding and border are included\n f\"{self.indent * 3}padding: 10px;\\n\" # optional padding inside the popup\n f\"{self.indent * 3}height: 100%;\\n\" # adjusts height relative to foreignObject\n f\"{self.indent * 3}width: 100%;\\n\" # adjusts width relative to foreignObject\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent * 2}.close-btn {{\\n\" # style for the close button\n f\"{self.indent * 3}cursor: pointer;\\n\"\n f\"{self.indent * 3}position: absolute;\\n\"\n f\"{self.indent * 3}top: 0;\\n\"\n f\"{self.indent * 3}right: 0;\\n\"\n f\"{self.indent * 3}padding: 5px;\\n\"\n f\"{self.indent * 3}font-size: 20px;\\n\"\n f\"{self.indent * 3}user-select: none;\\n\" # prevents text selection on click\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent}</style>\\n\"\n )\n\n def get_text_width(self, text: str) -> int:\n \"\"\"\n Estimate the width of a text string in the SVG based on the font size and font name.\n\n Args:\n text (str): The text content.\n\n Returns:\n int: The estimated width of the text in pixels.\n \"\"\"\n average_char_width_factor = 0.6\n average_char_width = average_char_width_factor * self.config.font_size\n return int(average_char_width * len(text))\n\n def add_element(self, element: str, level: int = 1, comment: str = None):\n \"\"\"\n Add an SVG element to the elements list with proper indentation.\n\n Args:\n element (str): SVG element to be added.\n level (int): Indentation level for the element.\n comment(str): optional comment to add\n \"\"\"\n base_indent = f\"{self.indent * level}\"\n if comment:\n indented_comment = f\"{base_indent}<!-- {comment} -->\\n\"\n self.elements.append(indented_comment)\n indented_element = f\"{base_indent}{element}\\n\"\n self.elements.append(indented_element)\n\n def add_circle(self, config: SVGNodeConfig):\n \"\"\"\n Add a circle element to the SVG, optionally making it clickable and with a hover effect.\n\n Args:\n config (SVGNodeConfig): Configuration for the circle element.\n \"\"\"\n color = config.fill if config.fill else self.config.default_color\n circle_element = f'<circle cx=\"{config.x}\" cy=\"{config.y}\" r=\"{config.width}\" fill=\"{color}\" class=\"{config.element_class}\" />'\n\n # If URL is provided, wrap the circle in an anchor tag to make it clickable\n if config.url:\n circle_indent = self.indent * (config.indent_level + 1)\n circle_element = f\"\"\"<a xlink:href=\"{config.url}\" target=\"_blank\">\n{circle_indent}{circle_element}\n</a>\"\"\"\n\n # Use add_group to add the circle element with proper indentation\n self.add_group(\n circle_element,\n group_id=config.id,\n group_class=config.element_class,\n level=config.indent_level,\n comment=config.comment,\n )\n\n def add_rectangle(\n self,\n x: int,\n y: int,\n width: int,\n height: int,\n fill: str = None,\n indent_level: int = 1,\n ):\n \"\"\"\n Add a rectangle element to the SVG.\n\n Args:\n x (int): X-coordinate of the rectangle's top-left corner.\n y (int): Y-coordinate of the rectangle's top-left corner.\n width (int): Width of the rectangle.\n height (int): Height of the rectangle.\n fill (str, optional): Fill color of the rectangle. Defaults to the default color.\n indent_level (int): Indentation level for the rectangle.\n \"\"\"\n color = fill if fill else self.config.default_color\n rect = f'{self.indent * 3}<rect x=\"{x}\" y=\"{y}\" width=\"{width}\" height=\"{height}\" fill=\"{color}\" />\\n'\n self.add_element(rect)\n\n def add_legend_column(\n self,\n items: List[Tuple[str, str]],\n title: str,\n x: int,\n y: int,\n width: int,\n height: int,\n ) -> None:\n \"\"\"\n Add a legend column to the SVG.\n\n Args:\n items (List[Tuple[str, str]]): List of tuples with color code and label.\n title (str): Title of the legend.\n x (int): X position of the legend.\n y (int): Y position of the legend.\n width (int): Width of the color box in the legend.\n height (int): Height of each legend item.\n \"\"\"\n self.add_text(x, y - height, title, font_weight=\"bold\")\n for index, (color, label) in enumerate(items):\n self.add_rectangle(x, y + index * (height + 5), width, height, color)\n self.add_text(x + width + 10, y + index * (height + 5) + height / 2, label)\n\n def add_text(\n self,\n x: int,\n y: int,\n text: str,\n fill: str = \"black\",\n font_weight: str = \"normal\",\n text_anchor: str = \"start\",\n ) -> None:\n \"\"\"\n Add text to the SVG.\n\n Args:\n x (int): X position of the text.\n y (int): Y position of the text.\n text (str): Text content.\n fill (str, optional): Fill color of the text. Defaults to \"black\".\n font_weight (str, optional): Font weight (normal, bold, etc.). Defaults to \"normal\".\n text_anchor (str, optional): Text alignment (start, middle, end). Defaults to \"start\".\n \"\"\"\n escaped_text = html.escape(text)\n text_element = (\n f'<text x=\"{x}\" y=\"{y}\" fill=\"{fill}\" '\n f'font-family=\"{self.config.font}\" '\n f'font-size=\"{self.config.font_size}\" '\n f'font-weight=\"{font_weight}\" '\n f'text-anchor=\"{text_anchor}\">'\n f\"{escaped_text}</text>\\n\"\n )\n self.add_element(text_element)\n\n def add_group(\n self,\n content: str,\n group_id: str = None,\n group_class: str = None,\n level: int = 1,\n comment: str = None,\n ):\n \"\"\"\n Add a group of elements to the SVG.\n\n Args:\n content (str): SVG content to be grouped.\n group_id (str, optional): ID for the group.\n group_class (str, optional): Class for the group.\n level (int): Indentation level for the group.\n \"\"\"\n group_attrs = []\n if group_id:\n group_attrs.append(f'id=\"{group_id}\"')\n if group_class:\n group_attrs.append(f'class=\"{group_class}\"')\n attrs_str = \" \".join(group_attrs)\n indented_content = \"\\n\".join(\n f\"{self.indent * (level + 1)}{line}\" for line in content.strip().split(\"\\n\")\n )\n group_str = f\"{self.indent * level}<g {attrs_str}>\\n{indented_content}\\n{self.indent * level}</g>\\n\"\n self.add_element(group_str, level=level, comment=comment)\n\n def add_pie_segment(\n self,\n cx: int,\n cy: int,\n radius: int,\n start_angle_deg: float,\n end_angle_deg: float,\n color: str,\n segment_name: str,\n segment_id: str = None,\n segment_class: str = None,\n segment_url: str = None,\n ) -> None:\n \"\"\"\n Add a pie segment to the SVG.\n\n Args:\n cx (int): X-coordinate of the center of the pie.\n cy (int): Y-coordinate of the center of the pie.\n radius (int): Radius of the pie.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n color (str): Fill color of the segment.\n segment_name (str): Name of the segment, used for the tooltip.\n segment_id (str, optional): ID for the segment group. Defaults to None.\n segment_class (str, optional): Class for the segment group. Defaults to None.\n segment_url (str, optional): URL linked to the segment. Defaults to None.\n\n Returns:\n None\n \"\"\"\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(start_angle_deg)\n end_angle_rad = radians(end_angle_deg)\n\n # Calculate the start and end points\n start_x = cx + radius * cos(start_angle_rad)\n start_y = cy + radius * sin(start_angle_rad)\n end_x = cx + radius * cos(end_angle_rad)\n end_y = cy + radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if end_angle_deg - start_angle_deg >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {cx} {cy} \"\n f\"L {start_x} {start_y} \"\n f\"A {radius} {radius} 0 {large_arc_flag} 1 {end_x} {end_y} \"\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(segment_name) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # If an URL is provided, wrap the content within an anchor\n if segment_url:\n group_content = (\n f'<a xlink:href=\"{segment_url}\" target=\"_blank\">\\n{group_content}</a>\\n'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content, group_id=segment_id, group_class=segment_class, level=2\n )\n\n def add_donut_segment(\n self,\n config: SVGNodeConfig,\n segment: DonutSegment,\n ) -> None:\n \"\"\"\n Add a donut segment to the SVG.\n\n Args:\n config (SVGNodeConfig): Configuration for the donut segment.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n \"\"\"\n cx, cy = config.x, config.y\n color = config.fill if config.fill else self.config.default_color\n\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(segment.start_angle)\n end_angle_rad = radians(segment.end_angle)\n\n # Calculate the start and end points for the outer radius\n start_x_outer = cx + segment.outer_radius * cos(start_angle_rad)\n start_y_outer = cy + segment.outer_radius * sin(start_angle_rad)\n end_x_outer = cx + segment.outer_radius * cos(end_angle_rad)\n end_y_outer = cy + segment.outer_radius * sin(end_angle_rad)\n\n # Calculate the start and end points for the inner radius\n start_x_inner = cx + segment.inner_radius * cos(start_angle_rad)\n start_y_inner = cy + segment.inner_radius * sin(start_angle_rad)\n end_x_inner = cx + segment.inner_radius * cos(end_angle_rad)\n end_y_inner = cy + segment.inner_radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if segment.end_angle - segment.start_angle >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {start_x_inner} {start_y_inner} \" # Move to start of inner arc\n f\"L {start_x_outer} {start_y_outer} \" # Line to start of outer arc\n f\"A {segment.outer_radius} {segment.outer_radius} 0 {large_arc_flag} 1 {end_x_outer} {end_y_outer} \" # Outer arc\n f\"L {end_x_inner} {end_y_inner} \" # Line to end of inner arc\n f\"A {segment.inner_radius} {segment.inner_radius} 0 {large_arc_flag} 0 {start_x_inner} {start_y_inner} \" # Inner arc (reverse)\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(config.title) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # Check if the segment should be shown as a popup\n if config.show_as_popup:\n # Add JavaScript to handle popup logic\n onclick_action = f\"onclick=\\\"showPopup('{config.url}', evt,this)\\\"\"\n group_content = f\"<g {onclick_action}>{group_content}</g>\"\n elif config.url:\n # Regular link behavior\n group_content = (\n f'<a xlink:href=\"{config.url}\" target=\"_blank\">{group_content}</a>'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content,\n group_id=config.id,\n group_class=config.element_class,\n level=2,\n comment=config.comment,\n )\n\n def get_java_script(self) -> str:\n \"\"\"\n get the java script code for interactive behavior\n \"\"\"\n popup_script = \"\"\"\n <script>\n function showPopup(url, evt,element) {\n // show a Popup fetching html content from the given url\n // for the given element\n // Handle the selection of the popup element\n selectPopupElement(element);\n var popup = document.getElementById('dcm-svg-popup');\n var iframe = document.getElementById('popup-iframe');\n var svgRect = evt.target.getBoundingClientRect();\n var svg = document.querySelector('svg');\n var svgPoint = svg.createSVGPoint();\n svgPoint.x = evt.clientX - svgRect.left;\n svgPoint.y = evt.clientY - svgRect.top;\n \n // Position the popup near the click event\n popup.setAttribute('x', svgPoint.x);\n popup.setAttribute('y', svgPoint.y);\n // Set the iframe src and make the popup visible\n iframe.setAttribute('src', url);\n popup.setAttribute('visibility', 'visible');\n }\n \n function selectPopupElement(element) {\n var popup = document.getElementById('dcm-svg-popup');\n \n // Deselect the current element if there is one\n if (popup.currentElement) {\n popup.currentElement.classList.remove('selected');\n }\n \n // Select the new element\n if (element) {\n element.classList.add('selected');\n popup.currentElement = element; // Update the reference to the currently selected element\n } else {\n popup.currentElement = null; // Clear the reference if no element is passed\n }\n }\n \n function closePopup() {\n var popup = document.getElementById('dcm-svg-popup');\n popup.setAttribute('visibility', 'hidden');\n // Deselect the element when the popup is closed\n selectPopupElement(null);\n }\n </script>\n \"\"\"\n return popup_script\n\n def get_svg_markup(self, with_java_script: bool = True) -> str:\n \"\"\"\n Generate the complete SVG markup.\n\n Args:\n with_java_script(bool): if True(default) the javascript code is included otherwise\n it's available via the get_java_script function\n\n Returns:\n str: String containing the complete SVG markup.\n \"\"\"\n # Get current date and time\n now = datetime.now()\n formatted_now = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n header = (\n f\"<!-- generated by dcm https://github.com/WolfgangFahl/dcm at {formatted_now} -->\\n\"\n f'<svg xmlns=\"http://www.w3.org/2000/svg\" '\n f'xmlns:xlink=\"http://www.w3.org/1999/xlink\" '\n f'width=\"{self.width}\" height=\"{self.config.total_height}\">\\n'\n )\n popup = \"\"\"\n <!-- Add a foreignObject for the popup -->\n<foreignObject id=\"dcm-svg-popup\" class=\"popup\" width=\"500\" height=\"354\" x=\"150\" y=\"260\" visibility=\"hidden\">\n <body xmlns=\"http://www.w3.org/1999/xhtml\">\n <!-- Content of your popup goes here -->\n <div class=\"popup\" style=\"background-color: white; border: 1px solid black; padding: 10px; box-sizing: border-box; width: 500px; height: 354px; position: relative;\">\n <span onclick=\"closePopup()\" class=\"close-btn\">ⓧ</span>\n <iframe id=\"popup-iframe\" width=\"100%\" height=\"100%\" frameborder=\"0\"></iframe>\n </div>\n </body>\n</foreignObject>\n\"\"\"\n\n styles = self.get_svg_style()\n body = \"\".join(self.elements)\n footer = \"</svg>\"\n java_script = self.get_java_script() if with_java_script else \"\"\n svg_markup = f\"{header}{java_script}{styles}{body}{popup}{footer}\"\n return svg_markup\n\n def save(self, filename: str):\n \"\"\"\n Save the SVG markup to a file.\n\n Args:\n filename (str): Filename to save the SVG markup.\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(self.get_svg_markup())" }, { "identifier": "SVGConfig", "path": "dcm/svg.py", "snippet": "class SVGConfig:\n \"\"\"\n Configuration class for SVG generation.\n\n Attributes:\n width (int): Width of the SVG canvas in pixels.\n height (int): Height of the SVG canvas in pixels.\n legend_height (int): Height reserved for the legend in pixels.\n font (str): Font family for text elements.\n font_size (int): Font size in points for text elements.\n indent (str): Indentation string, default is two spaces.\n default_color (str): Default color code for SVG elements.\n \"\"\"\n\n width: int = 600\n height: int = 600\n legend_height: int = 150\n font: str = \"Arial\"\n font_size: int = 12\n indent: str = \" \"\n default_color: str = \"#C0C0C0\"\n\n @property\n def total_height(self) -> int:\n \"\"\"\n Calculate total height of the SVG canvas including the legend.\n\n Returns:\n int: Total height of the SVG canvas.\n \"\"\"\n return self.height + self.legend_height" }, { "identifier": "Version", "path": "dcm/version.py", "snippet": "class Version:\n \"\"\"\n Version handling for nicepdf\n \"\"\"\n\n name = \"dcm\"\n version = dcm.__version__\n date = \"2023-11-06\"\n updated = \"2024-01-15\"\n description = \"python based visualization of dynamic competence maps\"\n\n authors = \"Wolfgang Fahl\"\n\n doc_url = \"https://wiki.bitplan.com/index.php/dcm\"\n chat_url = \"https://github.com/WolfgangFahl/dcm/discussions\"\n cm_url = \"https://github.com/WolfgangFahl/dcm\"\n\n license = f\"\"\"Copyright 2023 contributors. All rights reserved.\n\n Licensed under the Apache License 2.0\n http://www.apache.org/licenses/LICENSE-2.0\n\n Distributed on an \"AS IS\" basis without warranties\n or conditions of any kind, either express or implied.\"\"\"\n\n longDescription = f\"\"\"{name} version {version}\n{description}\n\n Created by {authors} on {date} last updated {updated}\"\"\"" } ]
import os from typing import Optional from urllib.parse import urlparse from fastapi import HTTPException from fastapi.responses import HTMLResponse from ngwidgets.file_selector import FileSelector from ngwidgets.input_webserver import InputWebserver from ngwidgets.webserver import WebserverConfig from nicegui import Client, app, ui from pydantic import BaseModel from dcm.dcm_assessment import Assessment from dcm.dcm_chart import DcmChart from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap, Learner from dcm.svg import SVG, SVGConfig from dcm.version import Version
11,980
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str
config: Optional[SVGConfig] = None
6
2023-11-06 09:24:24+00:00
16k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n def forward(self, batched_input, multimask_output, image_size):\n if isinstance(batched_input, list):\n outputs = self.forward_test(batched_input, multimask_output)\n else:\n outputs = self.forward_train(batched_input, multimask_output, image_size)\n return outputs\n\n def forward_train(self, batched_input, multimask_output, image_size):\n input_images = self.preprocess(batched_input)\n image_embeddings = self.image_encoder(input_images)\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=None, boxes=None, masks=None\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=(image_size, image_size),\n original_size=(image_size, image_size)\n )\n outputs = {\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks\n }\n return outputs\n\n @torch.no_grad()\n def forward_test(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input promts,\n C is determiend by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "SAMed/segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks = masks[0].detach().cpu().numpy()\n iou_predictions = iou_predictions[0].detach().cpu().numpy()\n low_res_masks = low_res_masks[0].detach().cpu().numpy()\n return masks, iou_predictions, low_res_masks\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecesary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,351
} curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries
keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
11
2023-11-03 17:05:40+00:00
16k
microsoft/PLEX
PLEX/util/misc.py
[ { "identifier": "setup_logging", "path": "PLEX/util/log.py", "snippet": "def setup_logging(args):\n log_dir = Path(args['log_dir']).expanduser()\n if not log_dir.is_dir():\n print(f'Creating log dir {log_dir}')\n log_dir.mkdir(parents=True)\n\n now_str = datetime.now().strftime('%m-%d-%y_%H.%M.%S')\n log_id = args.get('log_id', ''.join(random.choice(string.ascii_lowercase) for _ in range(4)))\n run_log_dir = log_dir/f'{now_str}_{log_id}'\n run_log_dir.mkdir()\n log = Log(run_log_dir)\n log(f'Log path: {log.path}')\n\n config_path = run_log_dir/'config.json'\n log(f'Dumping config to {config_path}')\n config_path.write_text(json.dumps(args))\n\n return log" }, { "identifier": "Timer", "path": "PLEX/util/timer.py", "snippet": "class Timer:\n \"\"\"\n A timer for measuring elapsed time statistics. The intended usage involves a with statement, like so:\n timer = Timer()\n with timer.time('iteration'):\n ... # do some stuff\n timer.averages['iteration']\n \"\"\"\n def __init__(self, log, verbose=True):\n self.log = log\n self.verbose = verbose\n self._averages = {}\n self._counts = {}\n\n def __getitem__(self, item):\n return self._averages[item]\n\n def _context_exit(self, name, dt, verbose):\n if name in self._counts:\n old_n = self._counts[name]\n new_n = old_n + 1\n self._averages[name] = (old_n * self._averages[name] + dt) / new_n\n self._counts[name] = new_n\n else:\n self._averages[name] = dt\n self._counts[name] = 1\n\n verbose = self.verbose if verbose is None else verbose\n assert type(verbose) is bool\n if verbose:\n self.log(f'Completed {name} in {dt} seconds. Average = {self[name]}')\n\n def time(self, name, verbose=None):\n return TimeContext(self, name, verbose)" }, { "identifier": "PLEX", "path": "PLEX/models/trajectory_models/plex.py", "snippet": "class PLEX(TrajectoryModel):\n def __init__(\n self,\n camera_names, obs_dims,\n proprio_dim, act_dim,\n hidden_dim,\n relative_position_encodings,\n future_step=1,\n obs_pred_gpt2_kwargs={},\n inv_d_pred_gpt2_kwargs={},\n **kwargs\n ):\n super().__init__(camera_names, obs_dims, proprio_dim, act_dim, hidden_dim, **kwargs)\n\n # Create separately trainable positional embeddings and LayerNorms for the observational and the inverse dynamics transformer.\n self.relative_position_encodings = relative_position_encodings\n obs_pred_gpt2_kwargs['relative_position_encodings'] = relative_position_encodings\n inv_d_pred_gpt2_kwargs['relative_position_encodings'] = relative_position_encodings\n\n self.obs_tr_history_len = obs_pred_gpt2_kwargs['K']\n self.inv_d_tr_history_len = inv_d_pred_gpt2_kwargs['K']\n\n if not self.relative_position_encodings:\n self.embed_obs_tr_timestep = nn.Embedding(self.obs_tr_history_len, hidden_dim)\n self.embed_inv_d_tr_timestep = nn.Embedding(self.inv_d_tr_history_len, hidden_dim)\n\n self.embed_obs_tr_ln = nn.LayerNorm(hidden_dim)\n self.embed_inv_d_ln = nn.LayerNorm(hidden_dim)\n\n self.n_obs_tr_components = 2 # namely: target returns and image observations\n # One extra position is for the context embedding.\n n_obs_tr_positions = 1 + self.obs_tr_history_len * self.n_obs_tr_components\n obs_tr_config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_positions=n_obs_tr_positions,\n n_ctx=n_obs_tr_positions,\n n_embd=hidden_dim,\n **obs_pred_gpt2_kwargs\n )\n self.obs_transformer = GPT2Model(obs_tr_config)\n\n self.n_inv_d_tr_components = 3 # namely: integrated observations (image obs. embeddings + proprios combined), image obs predictions, and actions\n n_inv_d_transformer_positions = self.inv_d_tr_history_len * self.n_inv_d_tr_components\n inv_d_transformer_config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_positions=n_inv_d_transformer_positions,\n n_ctx=n_inv_d_transformer_positions,\n n_embd=hidden_dim,\n **inv_d_pred_gpt2_kwargs\n )\n self.inv_d_transformer = GPT2Model(inv_d_transformer_config)\n\n self.future_step = future_step\n\n # NOTE: currently, using the Gaussian head-based stochastic prediction of observation latents doesn't work very well.\n # Therefore, we'll use deterministic prediction of observation latents instead.\n self.deterministic_future_obs_emb_predictions = True\n if not self.deterministic_future_obs_emb_predictions:\n self.predict_future = GaussianHead(\n input_dim=hidden_dim, output_dim=hidden_dim,\n std_bounds=self.std_bounds,\n hidden_dim=hidden_dim\n )\n\n\n def _get_tunables(self, image_encoder_tune_style='all', obs_pred_transformer_tune_style='all', inv_d_pred_transformer_tune_style='all'):\n tunables = super()._get_tunables(image_encoder_tune_style)\n\n #\n # Handle the tunables of the observation prediction transformer.\n #\n if not self.deterministic_future_obs_emb_predictions and obs_pred_transformer_tune_style != 'none':\n tunables.append(self.predict_future)\n\n if obs_pred_transformer_tune_style == 'all':\n tunables.extend([\n self.embed_obs_tr_ln,\n self.return_encoder,\n self.obs_transformer\n ])\n\n if self.impute_style == 'trainable':\n tunables.extend([\n getattr(self, f'missing_{x}_embedding') for x in [\n 'context', 'image', 'return'\n ]\n ])\n\n if not self.relative_position_encodings:\n tunables.append(self.embed_obs_tr_timestep) # Only for absolute position encodings.\n\n elif obs_pred_transformer_tune_style == 'last_block':\n # Fine-tune the last block of the transformer\n tunables.extend([\n self.obs_transformer.h[-1],\n self.obs_transformer.ln_f\n ])\n elif obs_pred_transformer_tune_style == 'linear_probe':\n # Only tune the predict_* networks\n pass\n elif obs_pred_transformer_tune_style == 'none':\n # Tune nothing -- no parameters got included\n pass\n else:\n raise ValueError(f'Invalid transformer_tune_style: {obs_pred_transformer_tune_style}')\n\n #\n # Handle the tunables of the inverse dynamics prediction transformer.\n #\n if inv_d_pred_transformer_tune_style != 'none':\n tunables.append(self.predict_action)\n\n if inv_d_pred_transformer_tune_style == 'all':\n tunables.extend([\n self.embed_inv_d_ln,\n self.proprio_encoder,\n self.action_encoder,\n self.image_and_proprio_emb_combiner,\n self.inv_d_transformer\n ])\n\n if self.impute_style == 'trainable':\n tunables.extend([\n getattr(self, f'missing_{x}_embedding') for x in [\n 'proprio', 'action'\n ]\n ])\n\n if not self.relative_position_encodings:\n tunables.append(self.embed_inv_d_tr_timestep) # Only for absolute position encodings.\n\n elif inv_d_pred_transformer_tune_style == 'last_block':\n # Fine-tune the last block of the transformer\n tunables.extend([\n self.inv_d_transformer.h[-1],\n self.inv_d_transformer.ln_f\n ])\n elif inv_d_pred_transformer_tune_style == 'linear_probe':\n # Only tune the predict_* networks\n pass\n elif inv_d_pred_transformer_tune_style == 'none':\n # Tune nothing -- no parameters got included\n pass\n else:\n raise ValueError(f'Invalid transformer_tune_style: {inv_d_pred_transformer_tune_style}')\n\n return tunables\n\n\n def _stack_inputs_and_masks(self, n_tr_input_components, inputs, mask, seq_length, batch_size, hidden_dim):\n assert len(inputs) == n_tr_input_components\n total_seq_length = len(inputs) * seq_length\n stacked_inputs = torch.stack(inputs, dim=1)\\\n .permute(0, 2, 1, 3)\\\n .reshape(batch_size, total_seq_length, hidden_dim) # [B, N-1, NS]\n\n # To make the attention mask fit the stacked inputs, have to stack it as well\n stacked_mask = torch.stack(\n [mask for _ in range(len(inputs))], dim=1\n ).permute(0, 2, 1).reshape(batch_size, total_seq_length)\n return stacked_inputs, stacked_mask\n\n\n def _predict_obs(self, context_embeddings, returns_embeddings, current_image_obs_embeddings, mask, seq_length, batch_size):\n stacked_obs_tr_inputs, stacked_obs_tr_mask = self._stack_inputs_and_masks(self.n_obs_tr_components,\n [returns_embeddings, current_image_obs_embeddings],\n mask,\n seq_length,\n batch_size,\n self.hidden_dim)\n # Account for context conditioning for the observation prediction transformer\n stacked_obs_tr_inputs = torch.cat([\n context_embeddings.unsqueeze(1),\n stacked_obs_tr_inputs\n ], dim=1) # [B, N, NS]\n stacked_obs_tr_inputs = self.embed_obs_tr_ln(stacked_obs_tr_inputs) # [B, N, NS]\n\n stacked_obs_tr_mask = torch.cat([\n torch.ones(batch_size, 1, device=stacked_obs_tr_mask.device),\n stacked_obs_tr_mask\n ], dim=1)\n\n # We feed the input embeddings (not word indices as in NLP) to the observation prediciton model.\n obs_tr_outputs = self.obs_transformer(\n inputs_embeds=stacked_obs_tr_inputs,\n attention_mask=stacked_obs_tr_mask\n )\n x_obs_tr = obs_tr_outputs['last_hidden_state']\n\n # Ignore first hidden state (corresponding to context)\n x_obs_tr = x_obs_tr[:,1:,:]\n\n # reshape x so that the second dimension corresponds to the original\n # returns-to-go (0), or observations (1); i.e. x[:,1,t] is the token for s_t\n x_obs_tr = x_obs_tr.reshape(batch_size, seq_length, self.n_obs_tr_components, self.hidden_dim).permute(0, 2, 1, 3)\n\n # Get predictions\n\n # For each time step, the observation prediction transformer outputs two latent states:\n # the first for return-to-go, the other for the state distribution parameters.\n predicted_obs_pos_idx = self.n_obs_tr_components - 1\n if not self.deterministic_future_obs_emb_predictions:\n future_image_obs_emb_distr = self.predict_future(x_obs_tr[:,predicted_obs_pos_idx])\n pred_future_image_obs_embeddings = future_image_obs_emb_distr.rsample()\n else:\n future_image_obs_emb_distr = None\n pred_future_image_obs_embeddings = x_obs_tr[:,predicted_obs_pos_idx]\n\n return pred_future_image_obs_embeddings, future_image_obs_emb_distr\n\n\n def _predict_actions(self, integrated_obs_embeddings, future_image_obs_emb, action_embeddings, mask, seq_length, batch_size):\n stacked_inv_d_inputs, stacked_inv_d_mask = self._stack_inputs_and_masks(self.n_inv_d_tr_components,\n [integrated_obs_embeddings, future_image_obs_emb, action_embeddings],\n mask,\n seq_length,\n batch_size,\n self.hidden_dim)\n\n inv_d_tr_outputs = self.inv_d_transformer(\n inputs_embeds=stacked_inv_d_inputs,\n attention_mask=stacked_inv_d_mask\n )\n x_inv_d_tr = inv_d_tr_outputs['last_hidden_state']\n\n # reshape x so that the second dimension corresponds to the original\n # observations (0), or actions (1); i.e. x[:,0,t] is the token for s_t\n x_inv_d_tr = x_inv_d_tr.reshape(batch_size, seq_length, self.n_inv_d_tr_components, self.hidden_dim).permute(0, 2, 1, 3)\n\n # For each time step, the inverse dynamics prediction transformer outputs three latent states, the last of which corresponds\n # to the action (see the call to self._stack_inputs_and_masks above). We want to predict that last component using all the data\n # that comes before it.\n predicted_action_pos_idx = self.n_inv_d_tr_components - 2\n pred_future_pred_actions = self.predict_action(x_inv_d_tr[:,predicted_action_pos_idx])\n return pred_future_pred_actions\n\n\n def forward(self, context, images, proprios, actions, rewards, returns_to_go, timesteps, mask, compute_pred_obs=True, compute_pred_future_actions=True, compute_known_future_actions=False, eval_mode=False):\n batch_dims = images[self.camera_names[0]].shape[:2]\n obs_tr_batch_size, seq_length = batch_dims\n batch_increase_ratio = self.obs_tr_history_len // self.inv_d_tr_history_len\n inv_d_batch_size = obs_tr_batch_size * batch_increase_ratio\n\n # NOTE: During training, the length of trajectory sequences that are fed to this method is (obs_pred.K + lookahead).\n # During evaluation, it is just obs_pred.K. So, we need to let this method's logic know about this, as below.\n if eval_mode:\n k = 0\n else:\n k = self.future_step\n seq_length -= k\n assert seq_length == self.obs_tr_history_len\n\n #\n # ******* STEP 1: Embed all the inputs to the model. *******\n #\n\n image_obs_embeddings = self.embed_image_observations({f'{cam}_image': images[cam] for cam in images.keys()}, batch_dims)\n prop_embeddings = self.embed_proprio(proprios, batch_dims)\n integrated_obs_embeddings = self.image_and_proprio_emb_combiner(torch.cat([image_obs_embeddings, prop_embeddings], dim=-1))\n action_embeddings = self.embed_action(actions, batch_dims)\n returns_embeddings = self.embed_return(returns_to_go, batch_dims)\n\n # Save for later\n orig_image_obs_embeddings = image_obs_embeddings[:,k:].detach()\n\n passthrough_current_image_obs_embeddings = image_obs_embeddings[:,:self.obs_tr_history_len]\n stopgrad_current_image_obs_embeddings = image_obs_embeddings[:,:self.obs_tr_history_len].detach()\n\n known_future_image_obs_embeddings = image_obs_embeddings[:,k:].reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n\n image_obs_embeddings = image_obs_embeddings[:,:self.obs_tr_history_len]\n prop_embeddings = prop_embeddings[:,:self.obs_tr_history_len]\n returns_embeddings = returns_embeddings[:,:self.obs_tr_history_len]\n integrated_obs_embeddings = integrated_obs_embeddings[:,:self.obs_tr_history_len].reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n action_embeddings = action_embeddings[:,:self.obs_tr_history_len].reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n\n # Masks for each model\n mask_prefix = mask[:,:self.obs_tr_history_len]\n inv_d_mask = mask_prefix.reshape(inv_d_batch_size, self.inv_d_tr_history_len)\n\n assert np.prod(passthrough_current_image_obs_embeddings.shape) == np.prod(known_future_image_obs_embeddings.shape)\n assert np.prod(stopgrad_current_image_obs_embeddings.shape) == np.prod(known_future_image_obs_embeddings.shape)\n\n if not self.relative_position_encodings:\n # Shift embeddings by position embedding\n # Obs. prediction and inverse dynamics prediction transformers potentially have their own position embeddings\n position_embeddings_for_obs_tr = self.embed_obs_tr_timestep(\n torch.arange(self.obs_tr_history_len, device=self.embed_obs_tr_timestep.weight.device))\n position_embeddings_for_obs_tr = torch.tile(position_embeddings_for_obs_tr, (obs_tr_batch_size, 1, 1))\n\n # Image obs. embeddings and returns will be fed only into the obs. prediction transformer.\n passthrough_current_image_obs_embeddings = passthrough_current_image_obs_embeddings.to(position_embeddings_for_obs_tr.device) + position_embeddings_for_obs_tr\n stopgrad_current_image_obs_embeddings = stopgrad_current_image_obs_embeddings.to(position_embeddings_for_obs_tr.device) + position_embeddings_for_obs_tr\n returns_embeddings = returns_embeddings.to(position_embeddings_for_obs_tr.device) + position_embeddings_for_obs_tr\n\n position_embeddings_for_inv_d_tr = self.embed_inv_d_tr_timestep(\n torch.arange(self.inv_d_tr_history_len, device=self.embed_inv_d_tr_timestep.weight.device))\n position_embeddings_for_inv_d_tr = torch.tile(position_embeddings_for_inv_d_tr, (inv_d_batch_size, 1, 1))\n\n # Integrated observations and actions will be fed only into the inv.d. transformer\n integrated_obs_embeddings = integrated_obs_embeddings.to(position_embeddings_for_inv_d_tr.device) + position_embeddings_for_inv_d_tr\n # NOTE: the future image observation embeddings aren't integrated with proprios, because predicting inverse dynamics from known current\n # and future proprio would be too easy and woudn't need to rely on the future image observation embeddings.\n known_future_image_obs_embeddings = known_future_image_obs_embeddings.to(position_embeddings_for_inv_d_tr.device) + position_embeddings_for_inv_d_tr\n action_embeddings = action_embeddings.to(position_embeddings_for_inv_d_tr.device) + position_embeddings_for_inv_d_tr\n\n #\n # ******* STEP 2: Use the observation prediction transformer to predict the observation embeddings. *******\n #\n\n # NOTE: this prediction makes sense only for trajectories with a task/context, since without one it's impossible to\n # reasonably predict the next observation. But we compute the predictions anyway and let the compute_losses(.) method ignore\n # these predictions during loss computation if needed.\n\n # For the obs. prediction transformer, we make the sequence look like (C, R_1, o_1, R_2, o_2, ...)\n if (compute_pred_future_actions and (actions is not None) and (context is not None)) or (compute_pred_obs and (context is not None)):\n context_embeddings = self.embed_context({f'{cam}_image': context[cam] for cam in context.keys()} if context is not None else None, batch_dims)\n passthrough_context_embeddings = context_embeddings\n stopgrad_context_embeddings = context_embeddings.detach()\n pred_future_image_obs_embeddings_from_passthrough_obs, _ = self._predict_obs(passthrough_context_embeddings, returns_embeddings, passthrough_current_image_obs_embeddings, mask_prefix, self.obs_tr_history_len, obs_tr_batch_size)\n pred_future_image_obs_embeddings_from_stopgrad_obs, future_image_obs_emb_distr_from_stopgrad_obs = self._predict_obs(stopgrad_context_embeddings, returns_embeddings, stopgrad_current_image_obs_embeddings, mask_prefix, self.obs_tr_history_len, obs_tr_batch_size)\n\n else:\n pred_future_image_obs_embeddings_from_passthrough_obs = None\n pred_future_image_obs_embeddings_from_stopgrad_obs = None\n future_image_obs_emb_distr_from_stopgrad_obs = None\n\n #\n # ******* STEP 3: Predict inverse dynamics, possibly in two ways. *******\n #\n\n # For the inv. dynamics prediction transformer, we make the sequence look like (int_o_1, pred_img_o_2, a_1, int_o_2, pred_img_o_3, a_2, ...)\n # Here, int_o_X are the embeddings of combined image-proprio observations, and pred_img_o_(X+1) are the predicted embeddings\n # of the next image observation. During learning, latter can be obtained either from STEP 2 or from the image_obs_embeddings array\n # *shifted by 1 position*. In this case, Presumably, the original image observation sequence contains 1 more entry than the action array.\n #\n # NOTE that the sequence doesn't contain a task specification C, since inverse dynamics should be task-agnostic.\n #\n # NOTE: We drop the last element of each input sequence before reshaping the inputs and passing them to the\n # inverse dynamics transformer. This is because the last action in each input sequence can't be predicted,\n # reliably, since we don't have the ground truth for the following observation, we omit this action from the\n # sequence.\n\n # NOTE: perhaps we shouldn't include predicted observations into the history (shaping the input as (int_o_1, pred_img_o_2, a_1, int_o_2, pred_img_o_3, a_2, ... ) includes them).\n # It makes the history long for no good reason (just due to including past predictions, which don't add any information), potentially making the model the model \"used to\" the\n # fact that predictions carry no extra info and making it largely ignore the prediction of the latest observation latent, which is actually crucial for making the correct action prediction.\n #\n if compute_pred_future_actions and (actions is not None):\n # If compute_pred_future_actions, this means we are doing inference. At inference/execution time, we don't have future observations\n # available to us, and therefore *must* rely on those predicted in STEP 2.\n assert pred_future_image_obs_embeddings_from_passthrough_obs is not None\n pred_future_image_obs_embeddings_from_passthrough_obs = pred_future_image_obs_embeddings_from_passthrough_obs.reshape(inv_d_batch_size, self.inv_d_tr_history_len, self.hidden_dim)\n # Remember to add position encodings as appropriate\n if not self.relative_position_encodings:\n pred_future_image_obs_embeddings_from_passthrough_obs + position_embeddings_for_inv_d_tr\n\n pred_future_pred_actions = self._predict_actions(integrated_obs_embeddings,\n ### For passing zeros instead of target vector\n #torch.zeros_like(pred_future_image_obs_embeddings_from_passthrough_obs),\n ### For passing goal instead of target vector\n #torch.tile(passthrough_context_embeddings, (30, 1, 1)).reshape(pred_future_image_obs_embeddings_from_passthrough_obs.shape),\n pred_future_image_obs_embeddings_from_passthrough_obs,\n action_embeddings,\n inv_d_mask,\n self.inv_d_tr_history_len,\n inv_d_batch_size)\n else:\n pred_future_pred_actions = None\n\n if compute_known_future_actions and (actions is not None):\n # If compute_loss, then we are doing learning. During learning, we know the actual future observation for each step in\n # the training trajectories, so we can use it to infer the actions.\n known_future_pred_actions = self._predict_actions(integrated_obs_embeddings,\n known_future_image_obs_embeddings,\n action_embeddings,\n inv_d_mask,\n self.inv_d_tr_history_len,\n inv_d_batch_size)\n else:\n known_future_pred_actions = None\n\n return (\n pred_future_pred_actions,\n known_future_pred_actions,\n orig_image_obs_embeddings,\n (future_image_obs_emb_distr_from_stopgrad_obs if not self.deterministic_future_obs_emb_predictions else pred_future_image_obs_embeddings_from_stopgrad_obs)\n )\n\n\n def compute_losses(self, forward_outputs, actions, contextual, mask):\n # Include superclass's losses\n losses = super().compute_losses(forward_outputs, actions, contextual, mask)\n\n # Unpack model outputs into local vars\n pred_future_action_preds, grounded_action_preds, target_obs_embeddings, future_obs_distr_from_stopgrad_obs = forward_outputs\n\n batch_size, actual_seq_length = target_obs_embeddings.shape[:2]\n assert actual_seq_length == self.obs_tr_history_len\n obs_mask = mask[:,:self.obs_tr_history_len]\n\n if actions is not None:\n target_actions = actions[:,:self.obs_tr_history_len]\n if grounded_action_preds is not None:\n mask__reshaped_for_predictions = obs_mask.reshape(grounded_action_preds.shape[0], -1)\n target_actions__reshaped_for_predictions = target_actions.reshape(grounded_action_preds.shape[0], grounded_action_preds.shape[1], -1)\n losses['grounded_inverse_dynamics'] = _action_loss(grounded_action_preds,\n target_actions__reshaped_for_predictions,\n mask__reshaped_for_predictions)\n if contextual and pred_future_action_preds is not None:\n # Action prediction based on predicted observations makes sense only for contextual trajectories\n # because without a context/task, observations can't be reasonably predicted.\n mask__reshaped_for_predictions = obs_mask.reshape(pred_future_action_preds.shape[0], -1)\n target_actions__reshaped_for_predictions = target_actions.reshape(pred_future_action_preds.shape[0], pred_future_action_preds.shape[1], -1)\n\n if pred_future_action_preds is not None:\n losses['predicted_inverse_dynamics'] = _action_loss(pred_future_action_preds,\n target_actions__reshaped_for_predictions,\n mask__reshaped_for_predictions)\n\n # Predict embedding k steps into the future.\n #\n # As with inverse dynamics computation based on predicted observations, observation prediction loss itself makes sense\n # only for contextual trajectories.\n if contextual:\n future_mask = obs_mask.bool()\n # NOTE: Here, we stop-grad the computed observation embeddings so that backpropagation affects only\n # the observation embedding prediction model, not the observation encoders. If we allow observation\n # encoders to be updated as well, the observation embeddings may eventually collapse due to\n # updates on observation-only batches. On observation-action batches, the encoders get updated anyway\n # thanks to backpropagation from the inverse dynamics.\n\n if not self.deterministic_future_obs_emb_predictions:\n future_embeddings = target_obs_embeddings[future_mask].detach()\n sliced_future_distr = slice_dist(future_obs_distr_from_stopgrad_obs, (slice(batch_size), slice(self.obs_tr_history_len)))\n masked_future_distr = slice_dist(sliced_future_distr, future_mask)\n future_log_probs = masked_future_distr.log_prob(future_embeddings)\n losses['future_prediction'] = -future_log_probs.mean()\n else:\n future_embeddings = target_obs_embeddings.detach()\n unmasked_losses = torch.mean((future_obs_distr_from_stopgrad_obs - future_embeddings)**2, dim=-1)\n assert unmasked_losses.shape == future_mask.shape\n selected_losses = unmasked_losses[future_mask]\n losses['future_prediction'] = selected_losses.mean()\n\n return losses" }, { "identifier": "MLPBCModel", "path": "PLEX/models/trajectory_models/mlp_bc.py", "snippet": "class MLPBCModel(TrajectoryModel):\n\n \"\"\"\n Simple MLP that predicts next action a from past observations.\n \"\"\"\n\n def __init__(\n self,\n camera_names, obs_dims,\n proprio_dim, act_dim,\n hidden_dim,\n n_layer=3,\n activation_function='relu',\n dropout=0.1,\n **kwargs\n ):\n super().__init__(camera_names, obs_dims, proprio_dim, act_dim, hidden_dim, **kwargs)\n\n layers = []\n prev_dim = (1 + 2 * self.history_len) * hidden_dim\n for _ in range(n_layer):\n layers.extend([\n nn.Linear(prev_dim, hidden_dim),\n ACTIVATION_FUNCTIONS[activation_function](),\n nn.Dropout(dropout)\n ])\n prev_dim = hidden_dim\n layers.extend([\n nn.Linear(prev_dim, act_dim),\n nn.Tanh()\n ])\n self.mlp = nn.Sequential(*layers)\n\n def _get_tunables(self, image_encoder_tune_style='all'):\n tunables = super()._get_tunables(image_encoder_tune_style)\n tunables.append(self.mlp)\n return tunables\n\n def forward(self, context, images, proprios, actions, rewards, returns_to_go, timesteps=None, attention_mask=None):\n batch_dims = images[self.camera_names[0]].shape[:2]\n\n context_embeddings = self.embed_context({f'{cam}_image': context[cam] for cam in context.keys()} if context is not None else None, batch_dims)\n obs_embeddings = self.embed_observations({f'{cam}_image': images[cam] for cam in images.keys()}, proprios, batch_dims)\n action_embeddings = self.embed_action(actions, batch_dims)\n # returns_embeddings = self.embed_return(returns_to_go, batch_dims)\n\n context_embeddings = torch.unsqueeze(context_embeddings, dim=1)\n concatenated = torch.cat([context_embeddings, obs_embeddings, action_embeddings], dim=1)\n flattened = torch.flatten(concatenated, start_dim=1)\n action_preds = self.mlp(flattened)\n # get_action expects forward rteval to have a sequence dimension\n action_preds = torch.unsqueeze(action_preds, dim=1)\n return action_preds,\n\n def compute_losses(self, forward_outputs, actions, contextual, mask):\n # Include superclass's losses\n losses = super().compute_losses(forward_outputs, actions, contextual, mask)\n action_preds, = forward_outputs\n action_targets = actions[:,-1:,:]\n assert action_preds.shape == action_targets.shape\n losses['action'] = F.mse_loss(action_preds, action_targets)\n return losses" }, { "identifier": "DecisionTransformer", "path": "PLEX/models/trajectory_models/decision_transformer.py", "snippet": "class DecisionTransformer(TrajectoryModel):\n def __init__(\n self,\n camera_names, obs_dims,\n proprio_dim, act_dim,\n hidden_dim,\n gpt2_kwargs={},\n **kwargs\n ):\n super().__init__(camera_names, obs_dims, proprio_dim, act_dim, hidden_dim, **kwargs)\n\n assert self.action_output_type == 'deterministic'\n layers = [nn.Linear(self.hidden_dim, self.act_dim)]\n if self.action_tanh:\n layers.append(nn.Tanh())\n self.predict_action = nn.Sequential(*layers)\n\n self.n_components = 3 # namely: return-to-go, observation, action\n n_positions = self.history_len * self.n_components\n config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_positions=n_positions,\n n_ctx=n_positions,\n n_embd=hidden_dim,\n **gpt2_kwargs\n )\n\n # NOTE: the only differences between this GPT2Model and the default Huggingface version are:\n # -- GPT2Model doesn't have absolute position embeddings (since we'll add those ourselves if needed).\n # -- Our GPT2Model has the option of using relative position embeddings.\n self.transformer = GPT2Model(config)\n if not self.transformer.config.relative_position_encodings:\n self.embed_timestep = nn.Embedding(T_MAX, hidden_dim)\n self.embed_ln = nn.LayerNorm(hidden_dim)\n\n assert not self.bc_mode, \"The original DT should be used only in offline RL mode (with --orl_learning_mode)\"\n\n def _get_tunables(self, image_encoder_tune_style='all', transformer_tune_style='all'):\n tunables = super()._get_tunables(image_encoder_tune_style)\n\n # Transformer\n if transformer_tune_style == 'all':\n tunables.extend([\n self.embed_ln,\n # The observation (state or image encoder) is already handled by super()._get_tunables(.)\n self.return_encoder,\n self.action_encoder,\n self.transformer\n ])\n\n if not globals.full_state_mode:\n tunables.extend([self.proprio_encoder])\n\n # Fine-tune everything\n if not self.transformer.config.relative_position_encodings:\n tunables.append(self.embed_timestep) # Only for absolute position encodings.\n\n elif transformer_tune_style == 'last_block':\n # Fine-tune the last block of the transformer\n tunables.extend([\n self.transformer.h[-1],\n self.transformer.ln_f\n ])\n elif transformer_tune_style == 'linear_probe':\n # Only tune the predict_* networks\n pass\n else:\n raise ValueError(f'Invalid transformer_tune_style: {transformer_tune_style}')\n\n return tunables\n\n def forward(self, context, obs, proprios, actions, rewards, returns_to_go, timesteps, mask, compute_pred_obs=True, compute_pred_future_actions=True, compute_known_future_actions=False, eval_mode=False):\n # Ignore context\n context = None\n\n # embed each modality with a different head\n\n if not globals.full_state_mode:\n images = obs\n batch_dims = images[self.camera_names[0]].shape[:2]\n batch_size, seq_length = batch_dims\n\n # embed each modality with a different head\n obs_embeddings = self.embed_observations({f'{cam}_image': images[cam] for cam in images.keys()}, proprios, batch_dims)\n else:\n states = obs\n batch_dims = states.shape[:2]\n batch_size, seq_length = batch_dims\n obs_embeddings = self.embed_observations(states, proprios, batch_dims)\n\n action_embeddings = self.embed_action(actions, batch_dims)\n returns_embeddings = self.embed_return(returns_to_go, batch_dims)\n\n if not self.transformer.config.relative_position_encodings:\n # shift embeddings by position embedding\n position_embeddings = self.embed_timestep(timesteps)\n obs_embeddings = obs_embeddings.to(position_embeddings.device) + position_embeddings\n action_embeddings = action_embeddings.to(position_embeddings.device) + position_embeddings\n returns_embeddings = returns_embeddings.to(position_embeddings.device) + position_embeddings\n\n if mask is None:\n # attention mask for GPT: 1 if can be attended to, 0 if not\n mask = torch.ones((batch_size, seq_length), dtype=torch.long)\n\n # this makes the sequence look like (R_1, o_1, a_1, R_2, o_2, a_2, ...)\n # which works nice in an autoregressive sense since observations predict actions\n stacked_inputs = torch.stack(\n (returns_embeddings, obs_embeddings, action_embeddings), dim=1\n ).permute(0, 2, 1, 3).reshape(batch_size, self.n_components*seq_length, self.hidden_dim)\n stacked_inputs = self.embed_ln(stacked_inputs)\n\n # to make the attention mask fit the stacked inputs, have to stack it as well\n stacked_attention_mask = torch.stack(\n (mask, mask, mask), dim=1\n ).permute(0, 2, 1).reshape(batch_size, self.n_components*seq_length)\n\n # we feed in the input embeddings (not word indices as in NLP) to the model\n transformer_outputs = self.transformer(\n inputs_embeds=stacked_inputs,\n attention_mask=stacked_attention_mask,\n )\n x = transformer_outputs['last_hidden_state']\n\n # reshape x so that the second dimension corresponds to the original\n # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t\n x = x.reshape(batch_size, seq_length, self.n_components, self.hidden_dim).permute(0, 2, 1, 3)\n\n # We predict only actions, not (latent) states or returns, because:\n # - Actions is ultimately what we care about predicting.\n # - Since we are working with high-d. observations whose encoders are getting trained together with DT\n # itself, optimizing w.r.t. losses that are based on predicting latent states may lead to encoders'\n # latent state collapse.\n # - We are working with robotics scenarios, where rewards are sparse and give rise to returns that are\n # easy to predict, so optimizing return prediction may lead to overfitting.\n action_preds = self.predict_action(x[:,1]) # predict next action given state and return\n\n return action_preds,\n\n def compute_losses(self, forward_outputs, actions, contextual, mask):\n losses = super().compute_losses(forward_outputs, actions, contextual, mask)\n action_preds, = forward_outputs\n losses['action'] = _action_loss(action_preds, actions, mask)\n return losses" }, { "identifier": "Trainer", "path": "PLEX/training/trainer.py", "snippet": "class Trainer:\n def __init__(self, model, optimizer, batch_size, get_batch,\n target_frame_rate,\n pad_frame_gaps,\n loss_weights,\n scheduler=None, eval_fns=None):\n self.model = DataParallel(model)\n self.optimizer = optimizer\n self.get_batch = get_batch\n self.batch_size = batch_size\n self.target_frame_rate = target_frame_rate\n self.pad_frame_gaps = pad_frame_gaps\n self.loss_weights = loss_weights\n self.scheduler = scheduler\n self.eval_fns = [] if eval_fns is None else eval_fns\n self.diagnostics = defaultdict(list)\n\n self.start_time = time.time()\n\n def train_iteration(self, num_steps, iter_num=0, print_fn=None):\n if print_fn is not None:\n print('\\n\\n\\n')\n print_fn(('=' * 26) + f' ITERATION {iter_num} ' + ('=' * 26) + '\\n')\n\n logs = dict()\n\n train_start = time.time()\n self.model.train()\n for _ in trange(num_steps):\n self.train_step()\n if self.scheduler is not None:\n self.scheduler.step()\n\n torch.cuda.empty_cache()\n\n logs['time/training'] = time.time() - train_start\n eval_start = time.time()\n\n print('\\n')\n print_fn(('-' * 20) + f' ITERATION {iter_num} EVALUATION ' + ('-' * 20) + '\\n')\n for eval_fn in self.eval_fns:\n outputs = eval_fn(self.model, iter_num)\n for k, v in outputs.items():\n logs[f'evaluation/{k}'] = v\n\n logs['time/total'] = time.time() - self.start_time\n logs['time/evaluation'] = time.time() - eval_start\n\n for key, values in self.diagnostics.items():\n logs[f'{key}/mean'] = np.mean(values)\n logs[f'{key}/std'] = np.std(values)\n self.diagnostics.clear() # reset for next iteration\n\n if print_fn is not None:\n print('\\n')\n print_fn(('-' * 15) + f' ITERATION {iter_num} EVALUATION RESULTS ' + ('-' * 15) + '\\n')\n for k, v in sorted(logs.items()):\n print_fn(f'{k}: {v}')\n\n return logs\n\n def train_step(self):\n batch = self.get_batch(self.batch_size, self.target_frame_rate, self.pad_frame_gaps)\n context, images, proprios, actions, rewards, returns_to_go, timesteps, mask = batch\n\n kwargs = {}\n if 'grounded_inverse_dynamics' in self.loss_weights.keys():\n kwargs = {'compute_pred_obs' : self.loss_weights['future_prediction'] > 0,\n 'compute_pred_future_actions' : (context is not None) and (self.loss_weights['predicted_inverse_dynamics'] > 0),\n 'compute_known_future_actions' : self.loss_weights['grounded_inverse_dynamics'] > 0}\n\n model_outputs = self.model.forward(*batch, **kwargs)\n losses = self.model.compute_losses(model_outputs, actions,\n (context is not None),\n mask=mask)\n total_loss = sum(self.loss_weights[k] * losses[k] for k in losses.keys())\n self.optimizer.zero_grad()\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), .25)\n self.optimizer.step()\n\n for k, v in losses.items():\n v = v.item()\n self.diagnostics[f'unscaled_loss/{k}'].append(v)\n self.diagnostics[f'scaled_loss/{k}'].append(self.loss_weights[k] * v)\n self.diagnostics['total_loss'].append(total_loss.item())\n\n return total_loss.item()" } ]
import numpy as np import torch import random import wandb import pickle import PLEX.util.globals as globals from collections import defaultdict from PLEX.util.log import setup_logging from PLEX.util.timer import Timer from PLEX.envs.environments import * from PLEX.models.trajectory_models.plex import PLEX from PLEX.models.trajectory_models.mlp_bc import MLPBCModel from PLEX.models.trajectory_models.decision_transformer import DecisionTransformer from PLEX.training.trainer import Trainer
11,877
env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'metaworld': env = MetaWorldEnv(example_task, use_normalized_reward=False, full_state_mode=globals.full_state_mode, camera_name=camera_names[0]) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'd4rl': env = d4rlEnv(example_task, full_state_mode=globals.full_state_mode) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'bridge' or example_task.dataset_type == 'bridge-v2': obs_dims = (3, image_size, image_size) proprio_dim = 7 action_dim = 7 return obs_dims, proprio_dim, action_dim else: raise ValueError('Unknown dataset type: {}'.format(example_task.dataset_type)) def setup_model(cmdline_args, example_task, log, device, camera_names, modalities_to_mask, data_dir, bc_mode): obs_dims, proprio_dim, action_dim = get_robot_dims(example_task, camera_names, cmdline_args['image_size']) pretrained_state_dict = {} # Load pretrained weights, if applicable load_path = cmdline_args['load_path'] if load_path is not None: load_path = load_path.replace('--TARGET_ROBOT--', cmdline_args['robot']) log(f'Loading pretrained weights from {load_path}') pretrained_state_dict = torch.load(load_path) std_bounds = (cmdline_args['std_min'], cmdline_args['std_max']) tune_style_kwargs = {} tune_style_kwargs['image_encoder_tune_style'] = cmdline_args['image_encoder_tune_style'] if cmdline_args['model'] == 'PLEX': assert cmdline_args['obs_pred.K'] is not None assert cmdline_args['inv_d_pred.K'] is not None assert cmdline_args['obs_pred.K'] >= cmdline_args['inv_d_pred.K'] assert cmdline_args['obs_pred.K'] % cmdline_args['inv_d_pred.K'] == 0 obs_pred_gpt2_kwargs = dict( n_layer=cmdline_args['obs_pred.n_layer'], n_head=cmdline_args['obs_pred.n_head'], K=cmdline_args['obs_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) inv_d_pred_gpt2_kwargs = dict( n_layer=cmdline_args['inv_d_pred.n_layer'], n_head=cmdline_args['inv_d_pred.n_head'], K=cmdline_args['inv_d_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) model = PLEX( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], # The history length for this model is always the observation prediction model's history length: history_len=cmdline_args['obs_pred.K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, relative_position_encodings=cmdline_args['relative_position_encodings'], future_step=cmdline_args['future_step'], std_bounds=std_bounds, obs_pred_gpt2_kwargs=obs_pred_gpt2_kwargs, inv_d_pred_gpt2_kwargs=inv_d_pred_gpt2_kwargs, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['obs_pred_transformer_tune_style'] = cmdline_args['obs_pred.transformer_tune_style'] tune_style_kwargs['inv_d_pred_transformer_tune_style'] = cmdline_args['inv_d_pred.transformer_tune_style'] elif cmdline_args['model'] == 'DT': # Configure the model gpt2_kwargs = dict( n_layer=cmdline_args['n_layer'], n_head=cmdline_args['n_head'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'], relative_position_encodings=cmdline_args['relative_position_encodings'] ) model = DecisionTransformer( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], history_len=cmdline_args['K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, gpt2_kwargs=gpt2_kwargs, std_bounds=std_bounds, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['transformer_tune_style'] = cmdline_args['transformer_tune_style'] elif cmdline_args['model'] == 'MLP':
class TaskDescriptor: def __init__(self, task_descr_string): self.dataset_location = task_descr_string = task_descr_string.rstrip('/').lstrip('/ ') parts = task_descr_string.split('/') self.frame_rate = None assert parts[0] in {'robosuite', 'robomimic', 'libero', 'metaworld', 'bridge', 'bridge-v2', 'd4rl'} self.dataset_type = parts[0] assert self.dataset_type == 'bridge-v2' or self.dataset_type in TASK_NAMES, f"ERROR! {self.dataset_type} is not in dataset type-to-task names dict! Task descr string is {task_descr_string}." assert self.dataset_type == 'bridge-v2' or parts[1] in TASK_NAMES[self.dataset_type] self.name = parts[1] assert parts[2] in ROBOT_NAMES self.robot = parts[2] def parse_comma_sep_param_value(comma_sep_param_value_str): param_values = [param_value.strip() for param_value in comma_sep_param_value_str.split(',')] return param_values def parse_tasks(task_spec_str, robot=None, global_max_traj=None): if task_spec_str is None or task_spec_str == 'None': return [], [] task_specs = parse_comma_sep_param_value(task_spec_str) descriptors = [] max_trajs = [] for task_spec in task_specs: if task_spec.startswith('(') and task_spec.endswith(')'): task_spec, max_traj = [part.strip('(): ') for part in task_spec.split(':')] max_trajs.append(int(max_traj)) else: max_trajs.append(global_max_traj) if robot is None: task = task_spec else: # --TARGET_ROBOT-- is a reserved token that can't be used to name an actual robot. task = task_spec.replace('--TARGET_ROBOT--', robot) assert task != task_spec, 'Invalid task directory string: {}. Needs to contain the \"--TARGET_ROBOT--\" token'.format(task) descriptors.append(TaskDescriptor(task)) return descriptors, max_trajs # reward_type can be 'native', 'negative', 'random', 'zero', and 'sparse'. def construct_rewards(original_rewards, successes, reward_type): if reward_type == 'sparse': rewards = np.asarray([sparse_reward(r) for r in successes]) elif reward_type == 'native': rewards = original_rewards elif reward_type == 'negative': rewards = -original_rewards elif reward_type == 'zero': rewards = np.zeros_like(original_rewards) elif reward_type == 'random': rewards = np.random.rand(*original_rewards.shape) else: raise NotImplementedError return rewards def set_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def construct_data_dir_path(cmdline_args): data_dir = cmdline_args['data_dir'].replace('--TARGET_ROBOT--', cmdline_args['robot']) data_dir = Path(data_dir).expanduser() return data_dir def setup_essentials(cmdline_args): set_seed(cmdline_args['seed']) data_shuffling_rng = np.random.RandomState(cmdline_args['seed']) log = setup_logging(cmdline_args) device = cmdline_args.get('device', 'cuda') log_to_wandb = cmdline_args.get('log_to_wandb', False) timer = Timer(log) camera_names = parse_comma_sep_param_value(cmdline_args['camera_names']) # Very important! This sets up observation preprocessing (such as resizing images to a desired size and swapping their format from HWC to CWH) # that will be done by the robomimic library to specified observation types when these observations are loaded from robomimic's h5py files or # generated by robosuite. if 'FULL_STATE' in camera_names: assert len(camera_names) == 1, "If FULL_STATE is present among camera names, it must be the only camera name." globals.full_state_mode = True else: globals.full_state_mode = False if not globals.full_state_mode: init_obs_preprocessing(camera_names, cmdline_args['image_size']) modalities_to_mask = parse_comma_sep_param_value(cmdline_args['modalities_to_mask']) data_dir = construct_data_dir_path(cmdline_args) common_env_metadata_dict = {'robosuite': None, 'metaworld': None, 'bridge': None} for modality in modalities_to_mask: assert modality in globals.MODALITIES return log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict def get_robot_dims(example_task, camera_names, image_size): if example_task.dataset_type == 'robosuite' or example_task.dataset_type == 'robomimic': env = RobosuiteEnv(example_task, use_normalized_reward=False, full_state_mode=globals.full_state_mode, camera_names=camera_names) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'metaworld': env = MetaWorldEnv(example_task, use_normalized_reward=False, full_state_mode=globals.full_state_mode, camera_name=camera_names[0]) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'd4rl': env = d4rlEnv(example_task, full_state_mode=globals.full_state_mode) env.close() return env.obs_dims, env.proprio_dim, env.action_dim elif example_task.dataset_type == 'bridge' or example_task.dataset_type == 'bridge-v2': obs_dims = (3, image_size, image_size) proprio_dim = 7 action_dim = 7 return obs_dims, proprio_dim, action_dim else: raise ValueError('Unknown dataset type: {}'.format(example_task.dataset_type)) def setup_model(cmdline_args, example_task, log, device, camera_names, modalities_to_mask, data_dir, bc_mode): obs_dims, proprio_dim, action_dim = get_robot_dims(example_task, camera_names, cmdline_args['image_size']) pretrained_state_dict = {} # Load pretrained weights, if applicable load_path = cmdline_args['load_path'] if load_path is not None: load_path = load_path.replace('--TARGET_ROBOT--', cmdline_args['robot']) log(f'Loading pretrained weights from {load_path}') pretrained_state_dict = torch.load(load_path) std_bounds = (cmdline_args['std_min'], cmdline_args['std_max']) tune_style_kwargs = {} tune_style_kwargs['image_encoder_tune_style'] = cmdline_args['image_encoder_tune_style'] if cmdline_args['model'] == 'PLEX': assert cmdline_args['obs_pred.K'] is not None assert cmdline_args['inv_d_pred.K'] is not None assert cmdline_args['obs_pred.K'] >= cmdline_args['inv_d_pred.K'] assert cmdline_args['obs_pred.K'] % cmdline_args['inv_d_pred.K'] == 0 obs_pred_gpt2_kwargs = dict( n_layer=cmdline_args['obs_pred.n_layer'], n_head=cmdline_args['obs_pred.n_head'], K=cmdline_args['obs_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) inv_d_pred_gpt2_kwargs = dict( n_layer=cmdline_args['inv_d_pred.n_layer'], n_head=cmdline_args['inv_d_pred.n_head'], K=cmdline_args['inv_d_pred.K'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'] ) model = PLEX( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], # The history length for this model is always the observation prediction model's history length: history_len=cmdline_args['obs_pred.K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, relative_position_encodings=cmdline_args['relative_position_encodings'], future_step=cmdline_args['future_step'], std_bounds=std_bounds, obs_pred_gpt2_kwargs=obs_pred_gpt2_kwargs, inv_d_pred_gpt2_kwargs=inv_d_pred_gpt2_kwargs, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['obs_pred_transformer_tune_style'] = cmdline_args['obs_pred.transformer_tune_style'] tune_style_kwargs['inv_d_pred_transformer_tune_style'] = cmdline_args['inv_d_pred.transformer_tune_style'] elif cmdline_args['model'] == 'DT': # Configure the model gpt2_kwargs = dict( n_layer=cmdline_args['n_layer'], n_head=cmdline_args['n_head'], activation_function=cmdline_args['activation_function'], resid_pdrop=cmdline_args['dropout'], attn_pdrop=cmdline_args['dropout'], relative_position_encodings=cmdline_args['relative_position_encodings'] ) model = DecisionTransformer( camera_names=camera_names, obs_dims=obs_dims, proprio_dim=proprio_dim, act_dim=action_dim, hidden_dim=cmdline_args['embed_dim'], history_len=cmdline_args['K'], image_encoder_arch=cmdline_args['image_encoder_arch'], image_encoder_load=cmdline_args['image_encoder_load'], use_random_crops=True, pool_type=cmdline_args['pool_type'], action_output_type=cmdline_args['action_output_type'], impute_style=cmdline_args['impute_style'], data_dir=data_dir, gpt2_kwargs=gpt2_kwargs, std_bounds=std_bounds, modalities_to_mask=modalities_to_mask, bc_mode=bc_mode ).to(device=device) # Record the tune style parameters tune_style_kwargs['transformer_tune_style'] = cmdline_args['transformer_tune_style'] elif cmdline_args['model'] == 'MLP':
model = MLPBCModel(
3
2023-11-06 09:38:09+00:00
16k
Giftify-Bot/Giftify-Bot
bot.py
[ { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[discord.TextChannel]\n The logging text channel for the guild.\n ping: Optional[discord.Role]\n The role to ping for notifications.\n reaction: str\n The reaction used for giveaways.\n participants_reaction,: str\n The reaction used for giveaways participants button.\n required_roles: List[discord.Role]\n The default roles required to join giveaway.\n blacklisted_roles: List[discord.Role]\n The default roles blacklisted from joining a giveaway.\n bypass_roles: List[discord.Role]\n The roles that bypass_roles certain restrictions.\n multiplier_roles: Dict[discord.Role, int]\n The multiplier_roles points assigned to each role.\n managers: List[discord.Role]\n The roles with manager permissions.\n dm_winner: bool\n Whether to send a direct message to the winner.\n dm_host: bool\n Whether to send a direct message to the host.\n channel_settings: List[ChannelConfig]\n The settings for each channel.\n color: discord.Colour\n The color used for messages.\n button_style: discord.ButtonStyle\n The style of the button.\n end_message: str\n The message sent when a giveaway ends.\n reroll_message: str\n The message sent when a giveaway rerolls.\n dm_message: str\n The direct message sent to winner.\n dm_host_message: str\n The direct message sent to host.\n gw_header: str\n The header for the giveaway message.\n gw_end_header: str\n The header for the giveaway end.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"guild\",\n \"logging\",\n \"ping\",\n \"reaction\",\n \"participants_reaction\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"managers\",\n \"dm_winner\",\n \"dm_host\",\n \"channel_settings\",\n \"color\",\n \"button_style\",\n \"end_message\",\n \"reroll_message\",\n \"dm_message\",\n \"dm_host_message\",\n \"gw_header\",\n \"gw_end_header\",\n )\n\n def __init__(\n self,\n guild: discord.Guild,\n *,\n logging: Optional[discord.TextChannel],\n ping: Optional[discord.Role],\n reaction: str,\n participants_reaction: str,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n managers: List[discord.Role],\n dm_winner: bool,\n dm_host: bool,\n channel_settings: List[ChannelConfig],\n color: discord.Colour,\n button_style: discord.ButtonStyle,\n end_message: str,\n reroll_message: str,\n dm_message: str,\n dm_host_message: str,\n gw_header: str,\n gw_end_header: str,\n ):\n self.guild = guild\n self.logging = logging\n self.ping = ping\n self.reaction = reaction\n self.participants_reaction = participants_reaction\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.managers = managers\n self.dm_winner = dm_winner\n self.dm_host = dm_host\n self.channel_settings = channel_settings\n self.color = color\n self.button_style = button_style\n self.end_message = end_message\n self.reroll_message = reroll_message\n self.dm_host_message = dm_host_message\n self.dm_message = dm_message\n self.gw_header = gw_header\n self.gw_end_header = gw_end_header\n\n def __repr__(self):\n return f\"<GuildConfig guild={self.guild!r}>\"\n\n @staticmethod\n async def _create_config(guild_id: int, pool: asyncpg.Pool) -> asyncpg.Record:\n return await pool.fetchrow(\n \"INSERT INTO configs (guild) VALUES ($1) RETURNING *\",\n guild_id,\n )\n\n @classmethod\n def _from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n channel_data: List[asyncpg.Record],\n ) -> \"GuildConfig\":\n data = dict(data)\n data[\"color\"] = discord.Colour(data[\"color\"])\n\n data[\"logging\"] = guild.get_channel(data[\"logging\"])\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier\n for role, multiplier in data[\"multiplier_roles\"].items()\n if role is not None and multiplier > 1\n }\n data[\"managers\"] = [\n guild.get_role(role) for role in data[\"managers\"] if role is not None\n ]\n\n data[\"button_style\"] = discord.utils.get(\n discord.ButtonStyle, value=data[\"button_style\"]\n )\n\n data[\"channel_settings\"] = [\n channel_setting\n for record in channel_data\n if (channel_setting := ChannelConfig.from_data(guild, record))\n ]\n\n data.pop(\"guild\") # We do not need this.\n\n return cls(guild, **data)\n\n def to_dict(self) -> GuildConfigData:\n \"\"\"Converts this GuildConfig object into a dict.\"\"\"\n\n data = GuildConfigData(\n guild=self.guild.id,\n reaction=self.reaction,\n participants_reaction=self.participants_reaction,\n required_roles=[\n role.id for role in self.required_roles if role is not None\n ],\n blacklisted_roles=[\n role.id for role in self.blacklisted_roles if role is not None\n ],\n bypass_roles=[role.id for role in self.bypass_roles if role is not None],\n multiplier_roles={\n role.id: multiplier_roles\n for role, multiplier_roles in self.multiplier_roles.items()\n if role is not None\n },\n managers=[role.id for role in self.managers if role is not None],\n dm_winner=self.dm_winner,\n dm_host=self.dm_host,\n color=int(self.color),\n button_style=self.button_style.value,\n end_message=self.end_message,\n reroll_message=self.reroll_message,\n dm_message=self.dm_message,\n dm_host_message=self.dm_host_message,\n gw_header=self.gw_header,\n gw_end_header=self.gw_end_header,\n ) # type: ignore\n if self.logging:\n data[\"logging\"] = self.logging.id\n if self.ping:\n data[\"ping\"] = self.ping.id\n return data\n\n @classmethod\n async def fetch(cls, guild: discord.Guild, pool: asyncpg.Pool) -> \"GuildConfig\":\n \"\"\"Create a GuildConfig instance from data retrieved from a database.\n\n Parameters\n ----------\n guild: discord.Guild\n The discord guild.\n pool: asyncpg.Pool\n The database connection pool.\n\n Returns\n -------\n GuildConfig\n An instance of GuildConfig populated with the retrieved data.\n \"\"\"\n\n data = await pool.fetchrow(\"SELECT * FROM configs WHERE guild = $1\", guild.id)\n channel_data: List[asyncpg.Record] = await pool.fetch(\n \"SELECT * FROM channel_configs WHERE guild = $1\", guild.id\n )\n\n if not data:\n data: asyncpg.Record = await cls._create_config(guild.id, pool)\n\n return cls._from_data(guild, data, channel_data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"GuildConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n GuildConfig\n The updated `GuildConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n data = self.to_dict()\n\n columns = \", \".join(data.keys())\n placeholders = \", \".join([f\"${i+1}\" for i in range(len(data))])\n update_clause = \", \".join(\n [f\"{key} = EXCLUDED.{key}\" for key in data.keys() if key != \"guild\"]\n )\n\n query = f\"\"\"\n INSERT INTO configs ({columns}) \n VALUES ({placeholders})\n ON CONFLICT (guild) DO \n UPDATE SET {update_clause}\n \"\"\"\n\n values = list(data.values())\n await pool.execute(query, *values)\n return self\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> ChannelConfig:\n ...\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = False,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n ...\n\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n \"\"\"\n Retrieves the configuration for a specific channel.\n\n Parameters\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel for which to retrieve the configuration.\n create_if_not_exists: Optional[bool]\n Whether to create a new configuration if it doesn't exist. Default is True.\n pool: Optional[asyncpg.Pool]\n The connection pool for interacting with the database.\n\n Returns\n -------\n Optional[ChannelConfig]\n The ChannelConfig object if it exists, or None if it doesn't exist and create_if_not_exists is set to False.\n\n Raises\n ------\n MaxChannelConfigCreationError\n If create_if_not_exists is True and the maximum number of channel configurations has already been reached.\n \"\"\"\n\n config = discord.utils.get(self.channel_settings, channel=channel)\n if config is not None:\n return config\n\n if create_if_not_exists:\n if len(self.channel_settings) >= 25:\n raise MaxChannelConfigCreationError()\n else:\n if pool:\n config = await ChannelConfig.create(channel.guild, channel, pool)\n self.channel_settings.append(config)\n return config\n\n return None" }, { "identifier": "Giveaway", "path": "models/giveaways.py", "snippet": "class Giveaway:\n \"\"\"\n Represents a giveaway object.\n\n Attributes\n ----------\n bot: Giftify\n The bot instance to handle the giveaway.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the giveaway message.\n extra_message_id: int\n The ID of the extra message with giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the user donating for the giveaway.\n prize: int\n The prize of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n winners: List[int]\n The winners of the giveaway.\n participants: List[int]\n The IDs participants for the giveaway.\n ended: bool\n Indicates whether the giveaway has ended.\n ends: datetime.datetime\n The timestamp when the giveaway will be ended.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[List[int]]\n The ID of the channels where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n \"\"\"\n\n __slots__ = (\n \"bot\",\n \"guild_id\",\n \"channel_id\",\n \"message_id\",\n \"extra_message_id\",\n \"prize\",\n \"host_id\",\n \"donor_id\",\n \"winner_count\",\n \"winners\",\n \"participants\",\n \"ended\",\n \"ends\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"messages\",\n \"messages_required\",\n \"allowed_message_channels\",\n \"amari\",\n \"weekly_amari\",\n )\n\n def __init__(self, *, bot: Giftify, record: asyncpg.Record):\n self.bot = bot\n self.guild_id: int = record[\"guild\"]\n self.channel_id: int = record[\"channel\"]\n self.message_id: int = record[\"message\"]\n self.extra_message_id: int = record[\"extra_message\"]\n self.prize: str = record[\"prize\"]\n self.host_id: int = record[\"host\"]\n self.donor_id: Optional[int] = record[\"donor\"]\n self.winner_count: int = record[\"winner_count\"]\n self.winners: List[int] = record[\"winners\"]\n self.participants: List[int] = record[\"participants\"]\n self.ended: bool = record[\"ended\"]\n self.ends: datetime.datetime = record[\"ends\"]\n self.required_roles: List[int] = record[\"required_roles\"] or []\n self.blacklisted_roles: List[int] = record[\"blacklisted_roles\"] or []\n self.bypass_roles: List[int] = record[\"bypass_roles\"] or []\n self.multiplier_roles: Dict[int, int] = {\n int(role): entries\n for role, entries in record[\"multiplier_roles\"].items()\n if entries > 1\n }\n self.messages: Dict[int, int] = {\n int(member): messages for member, messages in record[\"messages\"].items()\n }\n self.messages_required: Optional[int] = record[\"messages_required\"]\n self.allowed_message_channels: Optional[List[int]] = record[\"messages_channel\"]\n self.amari: Optional[int] = record[\"amari\"]\n self.weekly_amari: Optional[int] = record[\"weekly_amari\"]\n\n def __eq__(self, other: \"Giveaway\") -> bool:\n try:\n return (\n self.guild_id == other.guild_id\n and self.channel_id == other.channel_id\n and self.message_id == other.message_id\n )\n except AttributeError:\n return False\n\n def __hash__(self) -> int:\n return hash((self.guild_id, self.channel_id, self.message_id))\n\n def __repr__(self) -> str:\n return f\"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>\"\n\n @property\n def jump_to_giveaway(self) -> discord.ui.View:\n url = f\"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}\"\n view = BaseView(timeout=None)\n button = discord.ui.Button(label=\"Jump To Giveaway\", url=url)\n view.add_item(button)\n return view\n\n @staticmethod\n def create_embed(\n interaction: Interaction,\n config: GuildConfig,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n donor: Optional[discord.Member] = None,\n ) -> discord.Embed:\n assert interaction.guild is not None\n\n description = f\"Click the {config.reaction} button to join the giveaway!\\n\"\n description += f\"Hosted By: {interaction.user.mention}\\n\"\n\n if donor:\n description += f\"Donor: {donor.mention}\\n\"\n\n description += f\"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\\n\"\n\n embed = discord.Embed(\n title=prize,\n description=description,\n colour=config.color,\n timestamp=duration,\n )\n embed.set_footer(\n text=f\"{winners} winner(s) • Ends\",\n icon_url=interaction.guild.icon or interaction.client.user.display_avatar,\n )\n requirements = \"\"\n if required_roles:\n requirements += f\"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\\n\"\n if bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\\n\"\n\n if blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\\n\"\n if messages_required:\n requirements += (\n f\"Messages Required: **{messages_required}** message(s) (5s cooldown)\\n\"\n )\n if allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\\n\"\n\n if amari:\n requirements += f\"Amari Level: {amari}\\n\"\n if weekly_amari:\n requirements += f\"Weekly Amari: {weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if multiplier_roles:\n multiplier_roles_mention = \"\\n\".join(\n [\n f\"- {entry}x ・ {role.mention}\"\n for role, entry in multiplier_roles.items()\n if role is not None\n ]\n )\n embed.add_field(\n name=\"Bonus Entries\", value=multiplier_roles_mention, inline=False\n )\n\n return embed\n\n @classmethod\n async def start(\n cls,\n interaction: Interaction,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n config: GuildConfig,\n channel_config: Optional[ChannelConfig],\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n image: Optional[discord.Attachment] = None,\n donor: Optional[discord.Member] = None,\n ping: bool = False,\n message: Optional[str] = None,\n ):\n assert isinstance(interaction.channel, discord.TextChannel)\n assert interaction.guild is not None\n\n embed = cls.create_embed(\n interaction=interaction,\n config=config,\n duration=duration,\n winners=winners,\n prize=prize,\n required_roles=required_roles,\n blacklisted_roles=blacklisted_roles,\n bypass_roles=bypass_roles,\n multiplier_roles=multiplier_roles,\n messages_required=messages_required,\n allowed_message_channels=allowed_message_channels,\n amari=amari,\n weekly_amari=weekly_amari,\n donor=donor,\n )\n view = GiveawayView(\n config.reaction, config.participants_reaction, config.button_style\n )\n giveaway_message = await interaction.channel.send(\n config.gw_header, embed=embed, view=view\n )\n\n message_embed = discord.Embed(\n title=f\"{GIFT_EMOJI} Giveaway\",\n description=f\"**Message・** {message}\" if message else None,\n color=config.color,\n )\n\n if image:\n message_embed.set_image(url=image)\n\n extra_message = None\n\n if ping or image:\n ping_role = (\n channel_config.ping\n if channel_config and channel_config.ping\n else config.ping\n )\n extra_message = await interaction.channel.send(\n ping_role.mention if ping_role else \"\",\n embed=message_embed if message or image else None, # type: ignore\n allowed_mentions=discord.AllowedMentions(roles=True),\n )\n\n if extra_message is None and message is not None:\n extra_message = await interaction.channel.send(embed=message_embed)\n\n await interaction.client.timer_cog.create_timer(\n message_id=giveaway_message.id,\n channel_id=interaction.channel.id,\n guild_id=interaction.guild.id,\n author_id=interaction.user.id,\n title=\"Giveaway\",\n event=\"giveaway\",\n expires=duration,\n pool=interaction.client.pool,\n )\n\n return await cls.create_entry(\n bot=interaction.client,\n guild_id=interaction.guild.id,\n channel_id=interaction.channel.id,\n message_id=giveaway_message.id,\n prize=prize,\n host_id=interaction.user.id,\n donor_id=donor.id if donor else None,\n winner_count=winners,\n ends=duration,\n required_roles=[role.id for role in required_roles if role is not None]\n if required_roles\n else [],\n blacklisted_roles=[\n role.id for role in blacklisted_roles if role is not None\n ]\n if blacklisted_roles\n else [],\n bypass_roles=[role.id for role in bypass_roles if role is not None]\n if bypass_roles\n else [],\n multiplier_roles={\n role.id: entries\n for role, entries in multiplier_roles.items()\n if role is not None\n }\n if multiplier_roles\n else {},\n messages={},\n messages_required=messages_required,\n allowed_message_channels=[c.id for c in allowed_message_channels]\n if allowed_message_channels\n else [],\n extra_message_id=extra_message.id if extra_message else None,\n amari=amari,\n weekly_amari=weekly_amari,\n )\n\n @classmethod\n async def create_entry(\n cls,\n bot: Giftify,\n guild_id: int,\n channel_id: int,\n message_id: int,\n prize: str,\n host_id: int,\n winner_count: int,\n ends: datetime.datetime,\n required_roles: List[int],\n blacklisted_roles: List[int],\n bypass_roles: List[int],\n donor_id: Optional[int],\n multiplier_roles: Optional[dict],\n messages: Optional[dict],\n messages_required: Optional[int],\n allowed_message_channels: Optional[List[int]],\n extra_message_id: Optional[int],\n amari: Optional[int],\n weekly_amari: Optional[int],\n ) -> \"Giveaway\":\n \"\"\"\n Create a new Giveaway object and insert it into the database.\n\n Parameters\n ----------\n bot: Giftify\n The bot instance.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the message having the giveaway view.\n prize: str\n The prize of the giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the donor of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n ends: datetime.datetime\n The time when the giveaway ends.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[int]\n The ID of the channel where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n\n Returns\n -------\n Giveaway\n The created Giveaway object.\n \"\"\"\n record = await bot.pool.fetchrow(\n \"INSERT INTO giveaways (guild, channel, message, extra_message, host, donor, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, messages_channel, amari, weekly_amari) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) \"\n \"RETURNING *\",\n guild_id,\n channel_id,\n message_id,\n extra_message_id,\n host_id,\n donor_id,\n prize,\n winner_count,\n ends,\n required_roles,\n blacklisted_roles,\n bypass_roles,\n multiplier_roles,\n messages,\n messages_required,\n allowed_message_channels,\n amari,\n weekly_amari,\n )\n return cls(bot=bot, record=record)\n\n async def check_requirements(self, member: discord.Member) -> None:\n missing_roles = [\n role.mention\n for role_id in self.required_roles\n if (role := member.guild.get_role(role_id)) and role not in member.roles\n ]\n if missing_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you are missing the following required roles: {', '.join(missing_roles)}\"\n )\n\n blacklisted_roles = [\n role.mention\n for role_id in self.blacklisted_roles\n if (role := member.guild.get_role(role_id)) and role in member.roles\n ]\n if blacklisted_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you have the following blacklisted roles: {', '.join(blacklisted_roles)}\"\n )\n\n if self.amari:\n if (user_level := await self.bot.fetch_level(member)) < self.amari:\n raise GiveawayError(\n f\"Your amari level is less than the required level, you need `{self.amari - user_level}` more level(s) to join the giveaway.\"\n )\n\n if self.weekly_amari:\n if (\n weekly_exp := await self.bot.fetch_weekly_experience(member)\n ) < self.weekly_amari:\n raise GiveawayError(\n f\"Your weekly amari experience is less than the required weekly amari experience, you need `{self.weekly_amari - weekly_exp}` more experience point(s) to join the giveaway.\"\n )\n\n if self.messages_required and self.messages_required > 0:\n if (\n user_messages := self.messages.get(member.id, 0)\n ) < self.messages_required:\n raise GiveawayError(\n f\"You have sent less messages than the required messages, you need to send `{self.messages_required - user_messages}` more messages to join the giveaway.\"\n )\n\n def can_bypass(self, member: discord.Member) -> bool:\n return any(\n member.guild.get_role(role_id) in member.roles\n for role_id in self.bypass_roles\n )\n\n def get_multiplier_entries(self, member: discord.Member) -> int:\n entries = 0\n for role_id, multiplier_roles_entries in self.multiplier_roles.items():\n if member.get_role(int(role_id)):\n entries += multiplier_roles_entries\n\n return entries or 1\n\n async def join(self, member: discord.Member) -> int:\n try:\n await self.check_requirements(member)\n except GiveawayError as error:\n if not self.can_bypass(member):\n raise error\n\n if member.id in self.participants:\n raise GiveawayError(\"You have already joined the giveaway.\")\n\n number_of_entries = self.get_multiplier_entries(member)\n entries = [member.id] * number_of_entries\n\n self.participants += entries\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def leave(self, member: discord.Member) -> int:\n if member.id not in self.participants:\n raise GiveawayError(\"You are not a participant of this giveaway.\")\n\n self.participants = [\n participant for participant in self.participants if participant != member.id\n ]\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def _end(self):\n await self.bot.pool.execute(\n \"UPDATE giveaways SET ended = $1, winners = $2 WHERE guild = $3 AND channel = $4 AND message = $5\",\n True,\n self.winners,\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n\n async def end(self):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return await self._end()\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(self.winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_host:\n await self.dm_host(guild, winners, config.dm_host_message)\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.end_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def reroll(self, winner_count: int):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.reroll_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def cancel(self):\n await self.bot.pool.execute(\n \"\"\"DELETE FROM giveaways WHERE guild = $1 AND channel = $2 AND message = $3\"\"\",\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n if self.extra_message_id is not None:\n channel = self.bot.get_channel(self.channel_id)\n if channel is not None:\n await channel.get_partial_message(self.extra_message_id).delete() # type: ignore\n\n async def dm_host(\n self, guild: discord.Guild, winners: List[discord.Member], message: str\n ) -> None:\n host = await self.bot.get_or_fetch_member(guild, self.host_id)\n if not host:\n return\n\n description = safe_format(\n message,\n winners=\", \".join(winner.mention for winner in winners)\n if winners\n else \"No Winners\",\n prize=bold(self.prize),\n )\n\n embed = discord.Embed(\n title=f\"Your giveaway for {self.prize} has ended!\"[:256],\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await host.send(embed=embed, view=view)\n\n async def dm_winners(self, message: str, winners: List[discord.Member]) -> None:\n for winner in winners:\n description = safe_format(\n message, winner=winner.mention, prize=bold(self.prize)\n )\n\n embed = discord.Embed(\n title=\"You won!\",\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await winner.send(embed=embed, view=view)\n\n async def pick_winners(\n self, count: int, guild: discord.Guild\n ) -> List[discord.Member]:\n winners = []\n\n participants = self.participants.copy()\n\n while count > 0 and participants:\n member_id = random.choice(participants)\n member = await self.bot.get_or_fetch_member(guild, member_id)\n if member is not None and member not in winners:\n try:\n await self.check_requirements(member)\n except GiveawayError:\n pass\n else:\n winners.append(member)\n count -= 1\n\n participants.remove(member_id)\n\n return winners\n\n def get_end_embed(self, guild: discord.Guild, config: GuildConfig) -> discord.Embed:\n description = (\n f\"This giveaway has ended!\\n\"\n f\"Hosted By: <@!{self.host_id}>\\n\"\n f\"Winners: {', '.join(f'<@!{winner_id}>' for winner_id in self.winners) if self.winners else 'No Winners'}\\n\"\n f\"Ended: {discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='R')} ({discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='f')})\\n\"\n )\n if self.donor_id:\n description += f\"Donor: <@!{self.donor_id}>\\n\"\n embed = discord.Embed(\n title=self.prize,\n description=description,\n colour=config.color,\n timestamp=self.ends,\n )\n embed.set_footer(\n text=f\"{self.winner_count} winner(s) • Ended\",\n icon_url=guild.icon or self.bot.user.display_avatar,\n )\n\n requirements = \"\"\n if self.required_roles:\n requirements += f\"Required Roles: {', '.join(f'<@&{role_id}>' for role_id in self.required_roles)}\\n\"\n if self.bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(f'<@&{role_id}>' for role_id in self.bypass_roles)}\\n\"\n if self.blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(f'<@&{role_id}>' for role_id in self.blacklisted_roles)}\\n\"\n if self.messages_required:\n requirements += f\"Messages Required: **{self.messages_required}** message(s) (5s cooldown)\\n\"\n if self.allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{cid}>' for cid in self.allowed_message_channels)}\\n\"\n if self.amari:\n requirements += f\"Amari Level: {self.amari}\\n\"\n if self.weekly_amari:\n requirements += f\"Weekly Amari: {self.weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if self.multiplier_roles:\n multiplier_roles = \"\\n\".join(\n [\n f\"- {multiplier_entries}x ・ <@&{multiplier_role}>\"\n for multiplier_role, multiplier_entries in self.multiplier_roles.items()\n ]\n )\n embed.add_field(name=\"Bonus Entries\", value=multiplier_roles, inline=False)\n\n return embed" }, { "identifier": "Raffle", "path": "models/raffles.py", "snippet": "class Raffle:\n \"\"\"\n Represents a raffle object.\n\n Attributes\n ----------\n pool: asyncpg.Pool\n The PostgreSQL connection pool instance.\n guild: discord.Guild\n The guild (server) where the raffle is hosted.\n name: str\n The name of the raffle.\n winner: Optional[discord.Member]\n The member instance of the winner, or None if the raffle hasn't ended yet.\n deputy_roles: List[discord.Role]\n A list of roles associated with the raffle.\n deputy_members: List[discord.Member]\n A list of members associated with the raffle.\n tickets: Dict[discord.Member, int]\n A mapping of members to the number of tickets they have.\n \"\"\"\n\n def __init__(\n self,\n pool: asyncpg.Pool,\n *,\n guild: discord.Guild,\n name: str,\n winner: Optional[discord.Member],\n deputy_roles: List[discord.Role],\n deputy_members: List[discord.Member],\n tickets: Dict[discord.Member, int],\n ):\n self.pool = pool\n\n self.guild = guild\n self.name = name\n self.winner = winner\n self.deputy_roles = deputy_roles\n self.deputy_members = deputy_members\n self.tickets = tickets\n\n def __str__(self):\n return self.name\n\n def __repr__(self) -> str:\n return f\"<Raffle name={self.name} guild={self.guild} winner={self.winner}>\"\n\n def __hash__(self) -> int:\n return hash((self.name, self.guild))\n\n def __eq__(self, other: Raffle) -> bool:\n return self.name == other.name and self.guild == other.guild\n\n @classmethod\n async def from_record(cls, bot: Giftify, *, record: asyncpg.Record) -> Raffle:\n name = record[\"name\"]\n guild = bot.get_guild(record[\"guild\"])\n if guild is None:\n raise RaffleError(\"The guild having the raffle was not found.\")\n\n winner_id = record[\"winner\"]\n winner: Optional[discord.Member] = (\n (await bot.get_or_fetch_member(guild, winner_id) or FakeMember(winner_id))\n if winner_id\n else None\n ) # type: ignore\n\n deputy_roles = [guild.get_role(role_id) for role_id in record[\"deputy_roles\"]]\n deputy_members = [\n await bot.get_or_fetch_member(guild, member_id)\n for member_id in record[\"deputy_members\"]\n ]\n\n tickets = {\n await bot.get_or_fetch_member(guild, int(member_id)): num_tickets\n for member_id, num_tickets in record[\"tickets\"].items()\n }\n\n return cls(\n bot.pool,\n guild=guild,\n name=name,\n winner=winner,\n deputy_roles=filter_none(deputy_roles),\n deputy_members=filter_none(deputy_members),\n tickets=filter_none(tickets),\n )\n\n async def roll(self) -> discord.Member:\n \"\"\"\n End the raffle and set the winner.\n \"\"\"\n members = list(self.tickets.keys())\n weights = list(self.tickets.values())\n\n self.winner = random.choices(members, weights, k=1)[0]\n\n await self.save()\n\n return self.winner\n\n async def add_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Add a deputy to the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be added.\n \"\"\"\n if isinstance(obj, discord.Member):\n if len(self.deputy_members) >= 25:\n raise RaffleError(\"You cannot add more than 25 deputy members.\")\n self.deputy_members.append(obj)\n elif isinstance(obj, discord.Role):\n if len(self.deputy_roles) >= 10:\n raise RaffleError(\"You cannot add more than 10 deputy roles.\")\n self.deputy_roles.append(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def remove_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Remove a deputy from the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be removed.\n \"\"\"\n if isinstance(obj, discord.Member):\n if obj not in self.deputy_members:\n raise RaffleError(\"That member is not a deputy.\")\n self.deputy_members.remove(obj)\n elif isinstance(obj, discord.Role):\n if obj not in self.deputy_roles:\n raise RaffleError(\"That role is not a deputy.\")\n self.deputy_roles.remove(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def add_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Add tickets to a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to add.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] += num_tickets\n else:\n self.tickets[member] = num_tickets\n\n await self.save()\n\n async def remove_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Remove tickets from a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to remove.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] -= num_tickets\n if self.tickets[member] <= 0:\n del self.tickets[member]\n\n await self.save()\n else:\n raise RaffleError(\n f\"That member does not have any tickets in {self.name} raffle.\"\n )\n\n async def save(self) -> None:\n \"\"\"\n Update raffle attributes in the database.\n \"\"\"\n query = \"\"\"\n INSERT INTO raffles (guild, name, winner, deputy_roles, deputy_members, tickets)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (guild, name)\n DO UPDATE SET winner = EXCLUDED.winner, deputy_roles = EXCLUDED.deputy_roles,\n deputy_members = EXCLUDED.deputy_members, tickets = EXCLUDED.tickets;\n \"\"\"\n await self.pool.execute(\n query,\n self.guild.id,\n self.name,\n self.winner.id if self.winner else None,\n [role.id for role in self.deputy_roles],\n [member.id for member in self.deputy_members],\n {\n str(member.id): num_tickets\n for member, num_tickets in self.tickets.items()\n },\n )\n\n async def delete(self):\n \"\"\"\n Delete the raffle from the database.\n \"\"\"\n query = \"\"\"DELETE FROM raffles WHERE guild = $1 AND name = $2\"\"\"\n await self.pool.execute(query, self.guild.id, self.name)" }, { "identifier": "ERROR_EMOJI", "path": "utils/constants.py", "snippet": "ERROR_EMOJI = \"<:GiftifyError:1117842868057423914>\"" }, { "identifier": "SUCCESS_EMOJI", "path": "utils/constants.py", "snippet": "SUCCESS_EMOJI = \"<:GiftifySuccess:1100674526318166048>\"" }, { "identifier": "WARN_EMOJI", "path": "utils/constants.py", "snippet": "WARN_EMOJI = \"<:GiftifyWarn:1098498926564356106>\"" }, { "identifier": "db_init", "path": "utils/db.py", "snippet": "async def db_init(connection: asyncpg.Connection) -> None:\n await connection.set_type_codec(\n \"jsonb\", schema=\"pg_catalog\", encoder=_encode_jsonb, decoder=_decode_jsonb\n )" }, { "identifier": "CommandTree", "path": "utils/tree.py", "snippet": "class CommandTree(app_commands.CommandTree):\r\n client: \"Giftify\"\r\n\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r\n view = discord.ui.View()\r\n\r\n button = discord.ui.Button(label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\")\r\n\r\n view.add_item(button)\r\n\r\n if not interaction.response.is_done():\r\n await interaction.response.defer(thinking=True, ephemeral=True)\r\n\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n color=discord.Colour.red(),\r\n )\r\n\r\n if isinstance(error, app_commands.CommandInvokeError):\r\n if isinstance(error, MaxChannelConfigCreationError):\r\n embed.description = (\r\n f\"{WARN_EMOJI} You cannot setup configuration for more than 25 channels, please try removing some.\"\r\n )\r\n elif isinstance(error, discord.HTTPException):\r\n embed.description = f\"{WARN_EMOJI} Unknown HTTP error occured!\"\r\n else:\r\n embed.description = (\r\n f\"{WARN_EMOJI} An unknown error occurred , my developers have been notified about this error.\"\r\n )\r\n self.client.log_handler.log.exception(\"Exception occurred in the CommandTree:\\n\", exc_info=error)\r\n sentry_sdk.capture_exception(error)\r\n elif isinstance(error, app_commands.TransformerError):\r\n if isinstance(error, TransformerError):\r\n embed.description = f\"{WARN_EMOJI} {error.message}\"\r\n else:\r\n embed.description = f\"{WARN_EMOJI} {str(error)}\"\r\n\r\n elif isinstance(error, app_commands.MissingPermissions):\r\n missing = [perm.replace(\"_\", \" \").replace(\"guild\", \"server\").title() for perm in error.missing_permissions]\r\n\r\n format = \"\\n> \".join(missing)\r\n\r\n embed.description = (\r\n f\"{WARN_EMOJI} You are missing follwing permission(s) to run this command: \\n\\n> {format}\"\r\n )\r\n\r\n elif isinstance(error, app_commands.BotMissingPermissions):\r\n missing = [perm.replace(\"_\", \" \").replace(\"guild\", \"server\").title() for perm in error.missing_permissions]\r\n\r\n format = \"\\n> \".join(missing)\r\n\r\n embed.description = f\"{WARN_EMOJI} I am missing follwing permission(s) to run this command: \\n\\n > {format}\"\r\n\r\n elif isinstance(error, app_commands.CommandOnCooldown):\r\n cooldown = int(error.cooldown.per)\r\n retry_after = int(error.retry_after)\r\n embed.description = f\"{WARN_EMOJI} The cooldown for this command is **{cooldown}s**. Try running the command again after **{retry_after}s**.\"\r\n\r\n elif isinstance(error, app_commands.CommandNotFound):\r\n embed.description = f'{WARN_EMOJI} The command \"{error.name}\" was not found.'\r\n elif isinstance(error, DonationError):\r\n embed.description = f\"{WARN_EMOJI} {str(error)}\"\r\n elif isinstance(error, app_commands.CheckFailure):\r\n if isinstance(error, (DonationCategoryError, DonationPermissionsError)):\r\n embed.description = f\"{WARN_EMOJI} {str(error.message)}\"\r\n else:\r\n return\r\n else:\r\n embed.description = (\r\n f\"{WARN_EMOJI} An unknown error occured, my developers have been notified about this errors.\"\r\n )\r\n await interaction.followup.send(embed=embed, ephemeral=True)\r\n sentry_sdk.capture_exception(error)\r\n return self.client.log_handler.log.exception(\"Exception occurred in the CommandTree:\\n\", exc_info=error)\r\n\r\n return await interaction.followup.send(embed=embed, ephemeral=True)\r" }, { "identifier": "ConfirmationView", "path": "utils/view.py", "snippet": "class ConfirmationView(BaseView):\r\n def __init__(\r\n self,\r\n *,\r\n timeout: float,\r\n interaction: Interaction,\r\n success_message: str,\r\n cancel_message: str,\r\n ) -> None:\r\n super().__init__(timeout=timeout)\r\n self.interaction = interaction\r\n self.success_message = success_message\r\n self.cancel_message = cancel_message\r\n self.value: Optional[bool] = None\r\n\r\n @property\r\n def success_embed(self) -> discord.Embed:\r\n return discord.Embed(\r\n description=f\"{SUCCESS_EMOJI} {self.success_message}\",\r\n colour=discord.Colour.green(),\r\n )\r\n\r\n @property\r\n def cancel_embed(self) -> discord.Embed:\r\n return discord.Embed(\r\n description=f\"{SUCCESS_EMOJI} {self.cancel_message}\",\r\n colour=discord.Colour.green(),\r\n )\r\n\r\n async def interaction_check(self, interaction: Interaction) -> bool:\r\n if interaction.user and interaction.user.id == self.interaction.user.id:\r\n return True\r\n else:\r\n await interaction.response.send_message(\r\n \"This confirmation dialog is not for you.\", ephemeral=True\r\n )\r\n return False\r\n\r\n async def on_timeout(self) -> None:\r\n with contextlib.suppress(discord.HTTPException):\r\n for item in self.children:\r\n item.disabled = True\r\n await self.interaction.edit_original_response(view=self)\r\n\r\n @discord.ui.button(label=\"Confirm\", style=discord.ButtonStyle.green)\r\n async def confirm(self, interaction: Interaction, button: discord.ui.Button):\r\n self.value = True\r\n await interaction.response.edit_message(embed=self.success_embed, view=None)\r\n self.stop()\r\n\r\n @discord.ui.button(label=\"Cancel\", style=discord.ButtonStyle.red)\r\n async def cancel(self, interaction: Interaction, button: discord.ui.Button):\r\n self.value = False\r\n await interaction.response.edit_message(embed=self.cancel_embed, view=None)\r\n\r\n self.stop()\r" } ]
import asyncio import datetime import logging import os import pathlib import sys import traceback import aiohttp import asyncpg import discord import dotenv import jishaku import sentry_sdk import uvloop from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from amari import AmariClient from discord.ext import commands from discord.utils import MISSING from discord.utils import _ColourFormatter as ColourFormatter from expiringdict import ExpiringDict from sentry_sdk.integrations.logging import LoggingIntegration from models.giveaway_settings import GuildConfig from models.giveaways import Giveaway from models.raffles import Raffle from utils.constants import ERROR_EMOJI, SUCCESS_EMOJI, WARN_EMOJI from utils.db import db_init from utils.tree import CommandTree from utils.view import ConfirmationView from cogs.timer_manager import TimerManager from models.donation_settings import GuildDonationConfig
13,510
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {}
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {}
raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300)
2
2023-11-09 15:00:15+00:00
16k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test\")\n\n train_dataset = MultiJSFDataset(train_root, max_seq, random_seq)\n # val_dataset = JSFDataset(val_root, max_seq, random_seq)\n test_dataset = MultiJSFDataset(test_root, max_seq, random_seq)\n\n return train_dataset, test_dataset" }, { "identifier": "CoCoformer", "path": "model/CoCoFormer.py", "snippet": "class CoCoformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(CoCoformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n\n # past layer of chord\n self.cpast_layer_dmodel = d_model\n self.cpast_layer_nhead = 8\n self.cpast_dim_forward = 256\n self.cpast_layer_max_seq = 256\n self.cpast_layer_nlayers = 1\n\n # past layer of beats\n self.bpast_layer_dmodel = d_model\n self.bpast_layer_nhead = 8\n self.bpast_dim_forward = 256\n self.bpast_layer_max_seq = 1024\n self.bpast_layer_nlayers = 1\n\n # Input embedding\n self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)\n self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)\n # Positional encoding\n self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)\n self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)\n\n # Base transformer\n if not self.rpr:\n # To make a decoder-only transformer we need to use masked encoder layers\n # Dummy decoder to essentially just return the encoder output\n encoder_norm = LayerNorm(self.d_model)\n encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout)\n encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)\n encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,\n self.b_max_seq, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy\n )\n # RPR Transformer\n elif self.rpr:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,\n er_len=self.max_seq)\n encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout, er_len=self.max_seq)\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,\n self.c_max_seq, self.b_max_seq, encoder_norm)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n\n # Final output is a softmaxed linear layer\n # TODO: verify the size of linear\n self.Norm1 = nn.LayerNorm(1024)\n self.ReLU = nn.ReLU()\n self.Norm2 = nn.LayerNorm(181)\n self.Dropout = nn.Dropout(dropout)\n self.transLinear = nn.Linear(256, 256)\n self.Wout1 = nn.Linear(self.d_model, 1024)\n self.Wout2 = nn.Linear(1024, 1024)\n self.Wout3 = nn.Linear(1024, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the transformer model.\"\"\"\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n # forward\n def forward(self, x1, x2, x3, mask=True):\n\n args = parse_train_args()\n # for pure-Transformer:\n # Transformer module:\n if mask is True:\n if args.gpu[0] != -1:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cuda(device=args.gpu[0])\n else:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cpu()\n else:\n mask = None\n # Input shape is (max_seq, batch_size, d_model)\n x_n = self.n_embedding(x1)\n x_n = x_n.permute(1, 0, 2)\n x_n = self.n_positional_encoding(x_n)\n\n x_c = self.c_embedding(x2)\n x_c = x_c.permute(1, 0, 2)\n x_c = self.c_positional_encoding(x_c)\n\n x_b = self.b_embedding(x3)\n x_b = x_b.permute(1, 0, 2)\n x_b = self.b_positional_encoding(x_b)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=torch.cat((x_n, x_c, x_b), dim=0), tgt=x_n,\n src_mask=mask)\n # x_out = self.transformer(src=x_transformer, tgt=x_transformer, src_mask=mask)\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n # concat\n # x_concat = torch.cat([x_out, x_out2], dim=1)\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout1(x_out))))\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout2(y))))\n y = self.Wout3(y)\n # y = self.Wout2(y)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y\n\n # unconditional generate\n def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n\n print(\"Generating sequence of max length:\", target_seq_length)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n while cur_i < target_seq_length:\n # gen_seq_batch = gen_seq.clone()\n y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n\n # Let the transformer decide to end if it wants to\n # if next_token == TOKEN_END:\n # print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n # break\n\n cur_i += 1\n if cur_i % 50 == 0:\n print(cur_i, \"/\", target_seq_length)\n\n return gen_seq[:, :cur_i]\n\n # conditional generate\n def conditional_generate(self, beats, chord, seq, c, bs, ba, bt, bb, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n chord = torch.tensor(chord, device=get_device()).unsqueeze(0)\n beats = torch.tensor(beats, device=get_device()).unsqueeze(0)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n primer = torch.tensor([c[0], bs[0], seq[0], ba[0]])\n primer_num = 1 # decide key to add\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n # first input: C B N B\n cur_i_n = 1\n cur_i_b = 2\n cur_i_c = 1\n check_error = 0\n pbar = tqdm(total=len(seq)*9)\n while cur_i < target_seq_length:\n a = gen_seq[..., :cur_i].cpu().numpy()\n # gen_seq_batch = gen_seq.clone()\n # print(\"input:\", gen_seq[..., :cur_i], chord[..., :cur_i_c], beats[..., :cur_i_b])\n y = self.softmax(self.forward(gen_seq[..., :cur_i], chord[..., :cur_i_c],\n beats[..., :cur_i_b]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n # check for y\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n if check_error > 256:\n print(\"error! regenerate!\")\n return False\n # next token is the next token\n if cur_i % 9 == 1: # token is chord, next token must be beats\n if not 178 < next_token < 191: # if it is not beat\n check_error += 1\n continue\n if cur_i % 9 in [2, 4, 6, 8]: # this token must be beat, next token must be note\n if not next_token < 129: # if it is not note\n check_error += 1\n continue\n else: # this token must be note, next token must be chord or beat\n if not 128 < next_token < 191: # if it is chord or beat\n check_error += 1\n continue\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n cur_i += 1\n pbar.update(1)\n cur_i_n += 1\n if cur_i % 9 == 0 and primer_num < len(seq):\n # add C B_S N_S B_A\n gen_seq[:, cur_i] = chord.squeeze()[primer_num]\n gen_seq[:, cur_i+1] = torch.tensor(bs[primer_num], device=get_device())\n gen_seq[:, cur_i+2] = torch.tensor(seq[primer_num], device=get_device())\n gen_seq[:, cur_i+3] = torch.tensor(ba[primer_num], device=get_device())\n primer_num += 1\n cur_i += 4\n pbar.update(4)\n cur_i_n += 1\n cur_i_b += 2\n cur_i_c += 1\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if cur_i % 9 != 0 and cur_i % 9 != 4 and primer_num < len(seq) + 1:\n # add B\n gen_seq[:, cur_i] = beats.squeeze()[cur_i_b]\n cur_i_b += 1\n cur_i_n += 1\n cur_i += 1\n pbar.update(1)\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if primer_num == len(seq) and cur_i == len(seq) * 9:\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n # print(cur_i, \"/\", target_seq_length)\n\n print(\"all errors:%d\" % check_error)\n return gen_seq[:, :cur_i]" }, { "identifier": "Discriminator", "path": "model/CoCoFormer.py", "snippet": "class Discriminator(nn.Module):\n \"\"\"\n to judge the true sample or fake\n return fake or true\n \"\"\"\n def __init__(self, input_emb=1, d_model=256, nhead=4, d_ff=512, dropout=0.5, out_emb=1024):\n super(Discriminator, self).__init__()\n self.linear1 = nn.Linear(input_emb, d_model)\n self.transformer = TransformerEncoderLayer(d_model, nhead, d_ff, dropout)\n self.linear2 = nn.Linear(d_model, out_emb)\n self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.Norm1 = nn.LayerNorm(d_model)\n self.Norm2 = nn.LayerNorm(out_emb)\n self.dropout = nn.Dropout(dropout)\n self.sigmoid = nn.Sigmoid()\n self.loss = nn.BCELoss()\n\n def forward(self, x, labels):\n x = x.float().unsqueeze(2)\n x = self.dropout(self.Norm1(self.linear1(x)))\n x = self.transformer(x)\n logits = self.dropout(self.Norm2(self.linear2(x)))\n logits = self.sigmoid(self.relu(self.maxpool(logits)))\n logits = logits.reshape(logits.shape[0] * logits.shape[1], -1)\n labels = labels.reshape(logits.shape[0] * logits.shape[1], -1)\n loss = self.loss(logits, labels)\n\n # import numpy as np\n # logits = logits.cpu().detach().numpy()\n # labels = labels.cpu().detach().numpy()\n # loss = []\n # for i in logits:\n # loss.append(np.log(1-1/(1+np.exp(i[0]))))\n output = (loss, logits)\n\n return output\n\n def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)" }, { "identifier": "PureTransformer", "path": "model/CoCoFormer.py", "snippet": "class PureTransformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(PureTransformer, self).__init__()\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n # Input embedding\n self.embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n\n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, mask=True):\n\n if mask is True:\n mask = self.transformer.generate_square_subsequent_mask(x[0].shape[1]).to(get_device())\n else:\n mask = None\n\n x = self.embedding(x)\n\n # Input shape is (max_seq, batch_size, d_model)\n x = x.permute(1, 0, 2)\n\n x = self.positional_encoding(x)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=x, tgt=x, src_mask=mask)\n\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n y = self.Wout(x_out)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n\n print(SEPERATOR)\n print(\"input_dir:\", args.input_dir)\n print(\"output_dir:\", args.output_dir)\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n print(\"max_sequence:\", args.max_sequence)\n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n o_stream.write(\"max_sequence: \" + str(args.max_sequence) + \"\\n\")\n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n # with torch.no_grad():\n # y1 = model(x[1])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model(x[0])\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n loss2.backward()\n # out = args.loss[0] * loss1 + args.loss[1] * loss2\n\n opt.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train total loss:\", float(loss2),\n \"Time (s):\", time_took)\n\n return" }, { "identifier": "train_with_adv", "path": "utilities/run_model.py", "snippet": "def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,\n lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n out = -1\n start_epoch = 5\n model.train()\n model_disc.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n opt_disc.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n with torch.no_grad():\n y1 = model.module(x[1][0], x[1][1], x[1][2])\n y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n # discriminator model loss:\n if args.gpu[0] != -1:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])\n else:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)\n\n softmax = nn.Softmax(dim=-1)\n d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)\n d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)\n loss3 = d_fake_loss + d_real_loss\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n # out = loss3\n out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3\n\n out.backward()\n opt.step()\n opt_disc.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n if lr_disc_scheduler is not None:\n lr_disc_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt_disc),\n \"Train total loss:\", float(out), \"Train loss1:\", float(loss1), \"Train loss2:\", float(loss2),\n \"Train loss3:\", float(loss3), \"Time (s):\", time_took)\n\n return" }, { "identifier": "eval_model", "path": "utilities/run_model.py", "snippet": "def eval_model(model, dataloader, loss):\n\n model.eval()\n args = parse_train_args()\n avg_acc = -1\n avg_loss = -1\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n sum_loss = 0.0\n sum_acc = 0.0\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n x[i] = x[i].cpu()\n tgt[i] = tgt[i].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n\n # with torch.no_grad():\n # y1 = model(x[0])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n out = loss2\n\n sum_acc += float(compute_jsf_accuracy(y2, tgt))\n\n # y = y.reshape(y.shape[0] * y.shape[1], -1)\n # tgt = tgt.flatten()\n\n # out = loss.forward(y, tgt)\n\n sum_loss += float(out)\n\n avg_loss = sum_loss / n_test\n avg_acc = sum_acc / n_test\n\n return avg_loss, avg_acc" }, { "identifier": "get_metrics", "path": "utilities/run_model.py", "snippet": "def get_metrics(model, dataloader):\n \"\"\"\n Calculate TER: token error rate\n \"\"\"\n args = parse_eval_args()\n model.eval()\n # TER\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n c_acc, Ns_acc, Bs_acc, Na_acc, Ba_acc, Nt_acc, Bt_acc, Nb_acc, Bb_acc = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ter = []\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n\n y = model.module(x[0][0], x[0][1], x[0][2])\n # TER\n ter.append(compute_jsf_ter(y, tgt))\n\n for i in ter:\n c_acc += i[0]\n Bs_acc += i[1]\n Ns_acc += i[2]\n Ba_acc += i[3]\n Na_acc += i[4]\n Bt_acc += i[5]\n Nt_acc += i[6]\n Bb_acc += i[7]\n Nb_acc += i[8]\n TER = [c_acc / n_test, Bs_acc / n_test, Ns_acc / n_test, Ba_acc / n_test, Na_acc / n_test,\n Bt_acc / n_test, Nt_acc / n_test, Bb_acc / n_test, Nb_acc / n_test]\n # clear nan , or np.mean will only be nan if one is nan\n return TER" }, { "identifier": "train_with_pure_transformer", "path": "utilities/run_model.py", "snippet": "def train_with_pure_transformer(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n\n x = batch[0][0][0].to(args.gpu[0])\n tgt = batch[1][0][0].to(args.gpu[0])\n\n y = model(x)\n\n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n out = loss.forward(y, tgt)\n\n out.backward()\n opt.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train loss:\", float(out), \"Time (s):\", time_took)\n\n return" }, { "identifier": "params", "path": "utilities/run_model.py", "snippet": "def params(dataloader, model, model_disc):\n\n args = parse_train_args()\n model.eval()\n for batch_num, batch in enumerate(dataloader):\n flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),\n batch[0][0][1].cuda(args.gpu[0]),\n batch[0][0][2].cuda(args.gpu[0]))\n )\n print('flops:', flops, 'params:', params)\n break" } ]
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
11,252
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else:
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
4
2023-11-01 08:33:08+00:00
16k
tiendatnguyen-vision/Orbit-symmetrize
RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemented # The continuous generators\n self.discrete_generators = NotImplemented # The discrete generators\n self.z_scale = None # For scale noise for sampling elements\n self.is_orthogonal = None\n self.is_permutation = None\n self.d = NotImplemented # The dimension of the base representation\n self.device = torch.device('cpu')\n self.args = None\n\n def init(self, *args):\n \"\"\" Initialize the group object. \"\"\"\n # get the dimension of the base group representation\n if self.d is NotImplemented:\n if (self.lie_algebra is not NotImplemented) and \\\n len(self.lie_algebra) > 0:\n self.d = self.lie_algebra[0].size(-1)\n if (self.discrete_generators is not NotImplemented) and \\\n len(self.discrete_generators) > 0:\n self.d = self.discrete_generators[0].size(-1)\n\n self.args = args\n\n if self.lie_algebra is NotImplemented:\n self.lie_algebra = torch.zeros((0, self.d, self.d), device=self.device)\n if self.discrete_generators is NotImplemented:\n self.discrete_generators = torch.zeros((0, self.d, self.d), device=self.device)\n\n self.to(self.device)\n\n # set orthogonal flag automatically if not specified\n if self.is_permutation:\n self.is_orthogonal = True\n if self.is_orthogonal is None:\n self.is_orthogonal = True\n if len(self.lie_algebra) != 0:\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra])\n self.is_orthogonal &= rel_err(-A_dense.transpose(2, 1), A_dense) < 1e-6\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_orthogonal &= rel_err(h_dense.transpose(2, 1)@h_dense, Id[None]) < 1e-6\n\n # set regular flag automatically if not specified\n if self.is_orthogonal and (self.is_permutation is None):\n self.is_permutation = True\n # no infinitesmal generators and all rows have one 1\n self.is_permutation &= (len(self.lie_algebra) == 0)\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_permutation &= (((h_dense-1).abs()<1e-6).long().sum(-1) == 1).all()\n\n def exp(self, A):\n \"\"\" Matrix exponential \"\"\"\n return torch.linalg.matrix_exp(A)\n\n def num_constraints(self):\n \"\"\" Number of constraints to solve for the group \"\"\"\n return len(self.lie_algebra)+len(self.discrete_generators)\n\n def sample(self):\n \"\"\"Draw a sample from the group (not necessarily Haar measure)\"\"\"\n return self.samples(1)[0]\n\n def samples(self, N):\n \"\"\" Draw N samples from the group (not necessarily Haar measure)\"\"\"\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra]) \\\n if len(self.lie_algebra) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators]) \\\n if len(self.discrete_generators) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n z = torch.randn(N, A_dense.size(0), device=self.device)\n if self.z_scale is not None:\n z *= self.z_scale\n k = torch.randint(-MAX_POWER, MAX_POWER+1, (N, h_dense.size(0), 3), device=self.device)\n return noise2samples(z, k, A_dense, h_dense)\n\n def check_valid_group_elems(self, g):\n \"\"\" Check that the group elements are valid \"\"\"\n return True\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n outstr = f\"{self.__class__}\"\n if self.args:\n outstr += '('+''.join(repr(arg) for arg in self.args)+')'\n return outstr\n\n def __eq__(self, G2): # TODO: more permissive by checking that spans are equal?\n return repr(self) == repr(G2)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __lt__(self, other):\n \"\"\" For sorting purposes only \"\"\"\n return hash(self) < hash(other)\n\n def __mul__(self, other):\n return DirectProduct(self, other)\n\n def forward(self):\n \"\"\" Forward method, unused. \"\"\"\n return None\n\n def to(self, *args, **kwargs):\n \"\"\" Move the group to the specified device \"\"\"\n if isinstance(self.lie_algebra, torch.Tensor):\n self.lie_algebra = self.lie_algebra.to(*args, **kwargs)\n elif isinstance(self.lie_algebra, list):\n self.lie_algebra = [Ai.to(*args, **kwargs) for Ai in self.lie_algebra]\n if isinstance(self.discrete_generators, torch.Tensor):\n self.discrete_generators = self.discrete_generators.to(*args, **kwargs)\n elif isinstance(self.discrete_generators, list):\n self.discrete_generators = [hi.to(*args, **kwargs) for hi in self.discrete_generators]\n if self.z_scale is not None:\n self.z_scale = self.z_scale.to(*args, **kwargs)\n self.device = torch.empty(0).to(*args, **kwargs).device\n return self" }, { "identifier": "LinearOperator", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py", "snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> def mv(v):\n ... return torch.tensor([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(torch.ones(2))\n tensor([ 2., 3.])\n >>> A * torch.ones(2)\n tensor([ 2., 3.])\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n\n obj = super(LinearOperator, cls).__new__(cls)\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n return obj\n\n def __init__(self):\n super().__init__()\n self.ndim = 2\n self.dtype = None\n self.shape = None\n self.device = None\n\n def init(self, dtype, shape, device):\n \"\"\" Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if dtype is None:\n dtype = torch.float # force float 32\n else:\n if not isinstance(dtype, torch.dtype):\n dtype = torch_dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(f\"invalid shape {(shape,)} (must be 2-d)\")\n\n self.dtype = dtype\n self.shape = torch.Size(shape)\n self.device = torch_device(device)\n\n def size(self, dim=None):\n \"\"\" Return the size of this LinearOperator.\n This is a synonym for ``shape``.\n \"\"\"\n return self.shape if dim is None else self.shape[dim]\n\n def _matmat(self, V):\n \"\"\" Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])\n\n def _matvec(self, v):\n \"\"\" Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(v.reshape(-1, 1))\n\n def matvec(self, v):\n \"\"\" Matrix-vector multiplication.\n Performs the operation y=A*v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n if v.shape != (N,) and v.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(v)\n\n if v.ndim == 1:\n y = y.reshape(M)\n elif v.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, v):\n \"\"\" Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the v argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if v.shape != (M,) and v.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(v)\n\n if v.ndim == 1:\n y = y.reshape(N)\n elif v.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, v):\n \"\"\" Default implementation of _rmatvec; defers to adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n return self.H().matvec(v)\n\n def matmat(self, V):\n \"\"\" Matrix-matrix multiplication.\n Performs the operation y=A*V where A is an MxN linear\n operator and V dense N*K matrix or ndarray.\n Parameters\n ----------\n V : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the V argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(1):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._matmat(V)\n return Y\n\n def rmatmat(self, V):\n \"\"\" Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * V where A is an MxN linear\n operator and V is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n V : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(0):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._rmatmat(V)\n return Y\n\n def _rmatmat(self, V):\n \"\"\" Default implementation of _rmatmat defers to rmatvec or adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])\n return self.H().matmat(V)\n\n def forward(self, v):\n \"\"\" Matrix-vector or matrix-matrix multiplication. \"\"\"\n return self*v\n\n def __mul__(self, v):\n return self.dot(v)\n\n def dot(self, v):\n \"\"\" Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n v : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Av : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(v, LinearOperator):\n return _ProductLinearOperator(self, v)\n if torch.is_tensor(v):\n if v.ndim == 0:\n return _ScaledLinearOperator(self, v)\n if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:\n return self.matvec(v)\n if v.ndim == 2:\n return self.matmat(v)\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')\n\n def __matmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if isscalar(x):\n return _ScaledLinearOperator(self, x)\n return NotImplemented\n\n def __pow__(self, p):\n if isscalar(p):\n return _PowerLinearOperator(self, p)\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n if torch.is_tensor(x) and x.ndim == 2:\n return _SumLinearOperator(self, Lazy(x))\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dtype = 'unspecified dtype'\n else:\n dtype = 'dtype=' + str(self.dtype)\n\n return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'\n\n def adjoint(self):\n \"\"\" Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n def H(self):\n \"\"\" Hermitian adjoint. \"\"\"\n return self.adjoint()\n\n def transpose(self):\n \"\"\" Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n def t(self):\n \"\"\" Transpose this linear operator. \"\"\"\n return self.transpose()\n\n def _adjoint(self):\n \"\"\" Default implementation of _adjoint; defers to rmatvec. \"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def invt(self):\n \"\"\" Default implementation of inverse transpose; defers to inv + T \"\"\"\n return (self ** -1).transpose()\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return [email protected](self.size(-1), device=self.device)\n\n def to(self, device):\n \"\"\" Move this linear operator to a new device. \"\"\"\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "ConcatLazy", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class ConcatLazy(LinearOperator):\n \"\"\" Produces a linear operator equivalent to concatenating\n a collection of matrices Ms along axis=0 \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n assert all(M.size(0) == Ms[0].size(0) for M in Ms),\\\n f\"Trying to concatenate matrices of different sizes {[M.shape for M in Ms]}\"\n shape = (sum(M.size(0) for M in Ms), Ms[0].size(1))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matmat(self, V):\n return torch.cat([M@V for M in self.Ms])\n\n def _rmatmat(self, V):\n Vs = torch.chunk(V, len(self.Ms))\n return sum(Mi.t()@Vi for Mi, Vi in zip(self.Ms, Vs))\n\n def to_dense(self):\n dense_Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return torch.cat(dense_Ms)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "I", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class I(LinearOperator):\n \"\"\" Identity operator. \"\"\"\n\n def __init__(self, d, device=None):\n super().__init__()\n shape = (d, d)\n self.init(None, shape, device)\n\n def _matmat(self, V): # (c,k)\n return V\n\n def _matvec(self, v):\n return v\n\n def _adjoint(self):\n return self\n\n def invt(self):\n return self" }, { "identifier": "lazify", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazify(x):\n \"\"\" Convert a tensor LinearOperator. \"\"\"\n if isinstance(x, LinearOperator):\n return x\n if torch.is_tensor(x):\n return Lazy(x)\n raise NotImplementedError" }, { "identifier": "densify", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def densify(x):\n \"\"\" Convert a LinearOperator to a dense tensor. \"\"\"\n if isinstance(x, LinearOperator):\n return x.to_dense()\n if torch.is_tensor(x):\n return x\n raise NotImplementedError" }, { "identifier": "LazyJVP", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyJVP(LinearOperator):\n \"\"\" Lazy Jacobian-vector product. \"\"\"\n\n def __init__(self, operator_fn, X, TX):\n super().__init__()\n self.operator_fn = operator_fn\n self.X = X\n self.TX = TX\n self.init(torch.float, operator_fn(X).shape, X.device)\n self.to(self.device)\n\n def vjp(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x)@v, [self.X], [self.TX])[1]\n\n def vjp_T(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x).t()@v, [self.X], [self.TX])[1]\n\n def _matmat(self, V):\n return self.vjp(V)\n\n def _matvec(self, v):\n return self.vjp(v)\n\n def _rmatmat(self, V):\n return self.vjp_T(V)\n\n def to(self, device):\n self.X = self.X.to(device)\n self.TX = self.TX.to(device)\n self.device = self.X.device\n return self" }, { "identifier": "LazyPerm", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyPerm(LinearOperator):\n \"\"\" Lazy permutation. \"\"\"\n\n def __init__(self, perm):\n super().__init__()\n self.perm = perm\n shape = (len(perm), len(perm))\n self.init(None, shape, perm.device)\n\n def _matmat(self, V):\n return V[self.perm]\n\n def _matvec(self, v):\n return v[self.perm]\n\n def _adjoint(self):\n return LazyPerm(torch.argsort(self.perm))\n\n def invt(self):\n return self\n\n def to(self, device):\n self.perm = self.perm.to(device)\n self.device = self.perm.device\n return self" }, { "identifier": "LazyDirectSum", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyDirectSum(LinearOperator):\n \"\"\" Lazy direct sum. \"\"\"\n\n def __init__(self, Ms, multiplicities=None):\n super().__init__()\n self.Ms = Ms\n self.multiplicities = [1 for _ in Ms] if multiplicities is None else multiplicities\n shape = (sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)),\n sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return lazy_direct_matmat(v, self.Ms, self.multiplicities)\n\n def _matmat(self, V): # (n,k)\n return lazy_direct_matmat(V, self.Ms, self.multiplicities)\n\n def _adjoint(self):\n return LazyDirectSum([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyDirectSum([M.invt() for M in self.Ms])\n\n def to_dense(self):\n Ms_all = [M for M, c in zip(self.Ms, self.multiplicities)\n for _ in range(c)]\n Ms_all = [Mi.to_dense() if isinstance(Mi, LinearOperator)\n else Mi for Mi in Ms_all]\n return torch.block_diag(*Ms_all)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKron", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKron(LinearOperator):\n \"\"\" Lazy tensor product. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n eV = torch.movedim(MeV_front, 0, i)\n return eV.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKron([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyKron([M.invt() for M in self.Ms])\n\n def to_dense(self):\n self.to(self.device)\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(torch.kron, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKronsum", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKronsum(LinearOperator):\n \"\"\" Lazy tensor sum. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n dtype = torch.float\n device = get_device(Ms)\n self.init(dtype, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n out = 0*eV\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n M, eV_front = dtype_cast(M, eV_front)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n out, MeV_front = dtype_cast(out, MeV_front)\n out += torch.movedim(MeV_front, 0, i)\n return out.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKronsum([Mi.t() for Mi in self.Ms])\n\n def to_dense(self):\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(kronsum, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n # could also be implemented as follows,\n # but fusing the sum into a single linearOperator is faster\n # def lazy_kronsum(Ms):\n # n = len(Ms)\n # lprod = np.cumprod([1]+[mi.size(-1) for mi in Ms])\n # rprod = np.cumprod([1]+[mi.size(-1) for mi in reversed(Ms)])[::-1]\n # return reduce(lambda a,b: a+b,[lazy_kron([I(lprod[i]),Mi,I(rprod[i+1])])\n # for i,Mi in enumerate(Ms)])\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "lazy_direct_matmat", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazy_direct_matmat(v, Ms, mults):\n \"\"\" Computes the matrix-vector product of a direct sum of matrices\n with a vector. \"\"\"\n k = v.size(1) if len(v.shape) > 1 else 1\n i = 0\n y = []\n for M, multiplicity in zip(Ms, mults):\n i_end = i+multiplicity*M.size(-1)\n elems = M@v[i:i_end][None].reshape(k*multiplicity, M.size(-1)).t()\n y.append(elems.t().reshape(k, multiplicity*M.size(0)).t())\n i = i_end\n y = torch.cat(y) # concatenate over rep axis\n return y" }, { "identifier": "product", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def product(c):\n \"\"\" Product of a list of numbers. \"\"\"\n return reduce(lambda a, b: a*b, c)" }, { "identifier": "orthogonal_complement", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def orthogonal_complement(proj):\n \"\"\" Computes the orthogonal complement to a given matrix proj\"\"\"\n _, S, Vh = torch.linalg.svd(proj, full_matrices=True)\n rank = (S > 1e-5).sum()\n return Vh[rank:].conj().t()" }, { "identifier": "krylov_constraint_solve", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def krylov_constraint_solve(C, tol=1e-5):\n \"\"\" Computes the solution basis Q for the linear constraint CQ=0 and QᵀQ=I\n up to specified tolerance with C expressed as a LinearOperator. \"\"\"\n r = 5\n if C.size(0)*r*2 > 2e9:\n raise RuntimeError(f\"Solns for contraints {C.shape} too large to fit in memory\")\n found_rank = 5\n while found_rank == r:\n r *= 2 # Iterative doubling of rank until large enough to include the full solution space\n if C.size(0)*r > 2e9:\n logging.error(\"Hit memory limits, switching to \"\n \"sample equivariant subspace of size %r\", found_rank)\n break\n Q = krylov_constraint_solve_upto_r(C, r, tol)\n found_rank = Q.size(-1)\n return Q" }, { "identifier": "get_device", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def get_device(operators, devices=None):\n \"\"\" Returns the device of the first operator that has a device attribute. \"\"\"\n if devices is None:\n devices = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':\n return obj.device\n return torch.device('cpu')" } ]
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
11,277
Scalar = ScalarRep() def T(p, q=0, G=None): """ A convenience function for creating rank (p,q) tensors.""" return (V**p*V.t()**q)(G) def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps}
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix.""" return densify(self.rho(M)) def drho_dense(self, A): """ A convenience function which returns drho(A) as a dense matrix.""" return densify(self.drho(A)) def constraint_matrix(self): """ Constructs the equivariance constrant matrix (lazily) by concatenating the constraints (ρ(hᵢ)-I) for i=1,...M and dρ(Aₖ) for k=1,..,D from the generators of the symmetry group. """ n = self.size() constraints = [] constraints.extend([lazify(self.rho(h)).to(self.G.device)-I(n, device=self.G.device) \ for h in self.G.discrete_generators]) constraints.extend([lazify(self.drho(A)).to(self.G.device) for A in self.G.lie_algebra]) return ConcatLazy(constraints) if constraints else lazify( torch.zeros((1, n), device=self.G.device)) solcache = {} def equivariant_basis(self): """ Computes the equivariant solution basis for the given representation of size N. Canonicalizes problems and caches solutions for reuse. Output [Q (N,r)] """ if self == Scalar: return torch.ones((1, 1), device=self.G.device) canon_rep, perm = self.canonicalize() invperm = torch.argsort(perm) if canon_rep not in self.solcache: logging.info("%r cache miss", canon_rep) logging.info("Solving basis for %r%s", self, f", for G={self.G}" if self.G is not None else "") C_lazy = canon_rep.constraint_matrix() if C_lazy.size(0)*C_lazy.size(1) > 3e7: # Too large to use SVD result = krylov_constraint_solve(C_lazy) else: C_dense = C_lazy.to_dense() result = orthogonal_complement(C_dense) self.solcache[canon_rep] = result return self.solcache[canon_rep][invperm] def equivariant_projector(self): """ Computes the (lazy) projection matrix P=QQᵀ that projects to the equivariant basis.""" Q = self.equivariant_basis() Q_lazy = lazify(Q) P = Q_lazy@Q_lazy.H() return P def concrete(self): """ Concreteness """ return isinstance(self.G, Group) def __add__(self, other): """ Direct sum (⊕) of representations. """ if isinstance(other, int): if other == 0: return self return self+other*Scalar if both_concrete(self, other): return SumRep(self, other) return DeferredSumRep(self, other) def __radd__(self, other): if isinstance(other, int): if other == 0: return self return other*Scalar+self return NotImplemented def __mul__(self, other): """ Tensor sum (⊗) of representations. """ return mul_reps(self, other) def __rmul__(self, other): return mul_reps(other, self) def __pow__(self, other): """ Iterated tensor product. """ assert isinstance(other, int), \ f"Power only supported for integers, not {type(other)}" assert other >= 0, f"Negative powers {other} not supported" return reduce(lambda a, b: a*b, other*[self], Scalar) def __rshift__(self, other): """ Linear maps from self -> other """ return other*self.t() def __lshift__(self, other): """ Linear maps from other -> self """ return self*other.t() def __lt__(self, other): """ less than defined to disambiguate ordering multiple different representations. Canonical ordering is determined first by Group, then by size, then by hash""" if other == Scalar: return False try: if self.G < other.G: return True if self.G > other.G: return False except (AttributeError, TypeError): pass if self.size() < other.size(): return True if self.size() > other.size(): return False return hash(self) < hash(other) # For sorting purposes only def t(self): """ Dual representation V*, rho*, drho*.""" if isinstance(self.G, Group) and self.G.is_orthogonal: return self return Dual(self) @dispatch def mul_reps(ra, rb: int): """ Product of a scalar and a representation. """ if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M) rhoinvt = rho.invt() if isinstance(rho, LinearOperator) else torch.linalg.inv(rho).t() return rhoinvt def drho(self, A): return -self.rep.drho(A).t() def __repr__(self): return repr(self.rep)+"*" def t(self): return self.rep def __eq__(self, other): return type(other) is type(self) and self.rep == other.rep def __hash__(self): return hash((type(self), self.rep)) def __lt__(self, other): if other == self.rep: return False return super().__lt__(other) def size(self): return self.rep.size() # Alias V or Vector for an instance of the Base representation of a group V = Vector = Base() # An instance of the Scalar representation, equivalent to V**0 Scalar = ScalarRep() def T(p, q=0, G=None): """ A convenience function for creating rank (p,q) tensors.""" return (V**p*V.t()**q)(G) def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps}
device = self.G.device if self.G is not None else get_device(list(Qs.values()))
15
2023-11-01 07:19:02+00:00
16k
mbreuss/consistency_trajectory_models_toy_task
ctm_train.py
[ { "identifier": "ConsistencyTrajectoryModel", "path": "ctm/ctm.py", "snippet": "class ConsistencyTrajectoryModel(nn.Module):\n\n def __init__(\n self, \n data_dim: int,\n cond_dim: int,\n sampler_type: str,\n sigma_data: float,\n sigma_min: float,\n sigma_max: float,\n conditioned: bool,\n device: str,\n use_teacher: bool = False,\n solver_type: str = 'heun',\n n_discrete_t: int = 20,\n lr: float = 1e-4,\n rho: int = 7,\n diffusion_lambda: float = 1.0,\n gan_lambda: float = 0.0,\n ema_rate: float = 0.999,\n n_sampling_steps: int = 10,\n sigma_sample_density_type: str = 'loglogistic',\n ) -> None:\n super().__init__()\n self.use_gan = False\n self.ema_rate = ema_rate\n self.diffusion_lambda = diffusion_lambda\n self.gan_lambda = gan_lambda\n self.n_discrete_t = n_discrete_t\n self.model = ConsistencyTrajectoryNetwork(\n x_dim=data_dim,\n hidden_dim=256,\n time_embed_dim=4,\n cond_dim=cond_dim,\n cond_mask_prob=0.0,\n num_hidden_layers=4,\n output_dim=data_dim,\n dropout_rate=0.1,\n cond_conditional=conditioned\n ).to(device)\n # we need an ema version of the model for the consistency loss\n self.target_model = copy.deepcopy(self.model)\n for param in self.target_model.parameters():\n param.requires_grad = False\n # we further can use a teacher model for the solver\n self.use_teacher = use_teacher\n if self.use_teacher:\n self.teacher_model = copy.deepcopy(self.model)\n self.device = device\n self.sampler_type = sampler_type\n # use the score wrapper \n self.sigma_data = sigma_data\n self.sigma_min = sigma_min\n self.sigma_max = sigma_max\n self.rho = rho\n self.n_sampling_steps = n_sampling_steps\n self.solver_type = solver_type\n self.sigma_sample_density_type = sigma_sample_density_type\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)\n self.epochs = 0\n \n def diffusion_wrapper(self, model, x, cond, t, s):\n \"\"\"\n Performs the diffusion wrapper for the given model, x, cond, and t.\n Based on the conditioning from EDM Karras et al. 2022.\n\n Args:\n model (torch.nn.Module): The neural network model to be used for the diffusion process.\n x (torch.Tensor): The input tensor to the model.\n cond (torch.Tensor): The conditioning tensor to be used during the diffusion process.\n t (float): The time step for the diffusion process.\n\n Returns:\n torch.Tensor: The scaled output tensor after applying the diffusion wrapper to the model.\n \"\"\"\n c_skip = self.sigma_data**2 / (\n t ** 2 + self.sigma_data**2\n )\n c_out = (\n t * self.sigma_data / (t**2 + self.sigma_data**2) ** 0.5\n )\n # these two are not mentioned in the paper but they use it in their code\n c_in = 1 / (t**2 + self.sigma_data**2) ** 0.5\n \n t = 0.25 * torch.log(t + 1e-40)\n c_in = append_dims(c_in, x.ndim)\n c_out = append_dims(c_out, x.ndim)\n c_skip = append_dims(c_skip, x.ndim)\n\n diffusion_output = model(c_in * x, cond, t, s)\n scaled_output = c_out * diffusion_output + c_skip * x\n \n return scaled_output\n \n def cmt_wrapper(self, model, x, cond, t, s):\n \"\"\"\n Applies the new cmt wrapper from page 4 of https://openreview.net/attachment?id=ymjI8feDTD&name=pdf\n\n Args:\n model (torch.nn.Module): The neural network model to be used for the diffusion process.\n x (torch.Tensor): The input tensor to the model.\n cond (torch.Tensor): The conditioning tensor to be used during the diffusion process.\n t (float): The time step for the diffusion process.\n s: (float): the target noise level for the diffusion process.\n\n Returns:\n torch.Tensor: The scaled output tensor after applying the diffusion wrapper to the model.\n \"\"\"\n if len(t.shape) == 1:\n t = t.unsqueeze(1)\n if len(s.shape) == 1:\n s = s.unsqueeze(1)\n G_0 = (s / t) * x + (1 - s /t) * self.diffusion_wrapper(model, x, cond, t, s)\n \n return G_0\n \n def _update_ema_weights(self):\n \"\"\"\n Updates the exponential moving average (EMA) weights of the target model.\n\n The method performs the following steps:\n 1. Gets the state dictionary of the self.model (source model).\n 2. Updates the EMA weights for each parameter in the target model by computing the weighted average between \n the corresponding parameter in the target model and the parameter in the source model, using the EMA rate parameter.\n \"\"\"\n # Get the state dictionary of the current/source model\n state_dict = self.model.state_dict()\n # Get the state dictionary of the target model\n target_state_dict = self.target_model.state_dict()\n\n # Iterate over the parameters in the target model state dictionary\n for key in state_dict:\n if key in target_state_dict:\n # Update the EMA weights for each parameter\n target_param_data = target_state_dict[key].data\n model_param_data = state_dict[key].data\n target_state_dict[key].data.copy_((1 - self.ema_rate) * target_param_data + self.ema_rate * model_param_data)\n\n # You can optionally load the updated state dict into the target model, if necessary\n # self.target_model.load_state_dict(target_state_dict)\n\n def train_step(self, x, cond):\n \"\"\"\n Main training step method to compute the loss for the Consistency Trajectory Model.\n The loss consists of three parts: the consistency loss, the diffusion loss, and the GAN loss (optional).\n The first part is similar to Song et al. (2023) and the second part is similar to Karras et al. (2022).\n The GAN Part is not implemented right now, since its not attractive for Imitation Learning applications.\n \"\"\"\n self.model.train()\n t_ctm, s, u = self.sample_noise_levels(shape=(len(x),), N=self.n_discrete_t, device=self.device)\n noise = torch.randn_like(x)\n # get the noise samples\n x_t = x + noise * append_dims(t_ctm, x.ndim)\n # use the solver if we have a teacher model otherwise use the euler method\n solver_target = self.solver(x_t, cond, t_ctm, u)\n\n # compute the cmt consistency loss\n cmt_loss = self.ctm_loss(x_t, cond, t_ctm, s, u, solver_target)\n \n # compute the diffusion loss\n # sample noise for the diffusion loss from the continuous noise distribution\n if self.diffusion_lambda > 0:\n t_sm = self.make_sample_density()(shape=(len(x),), device=self.device)\n x_t_sm = x + noise * append_dims(t_sm, x.ndim)\n diffusion_loss = self.diffusion_loss(x, x_t_sm, cond, t_sm)\n else:\n diffusion_loss = 0\n # compute the GAN loss if chosen\n # not implemented yet\n if self.use_gan:\n gan_loss = self.gan_loss(x_t, cond, x_t_sm)\n else:\n gan_loss = 0\n\n # compute the total loss\n \n loss = cmt_loss + self.diffusion_lambda * diffusion_loss + self.gan_lambda * gan_loss\n \n # perform the backward pass\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n # update the ema weights\n self._update_ema_weights()\n \n return loss, cmt_loss, diffusion_loss, gan_loss\n \n def sample_noise_levels(self, shape, N, device='cpu'):\n \"\"\"\n Samples a tensor of the specified shape with noise levels \n from `N` discretized levels of the noise scheduler.\n\n Args:\n shape (tuple): Shape of the tensor to sample.\n N (int): Number of discrete noise levels to discretize the scheduler.\n device (str): Device on which to create the noise levels, 'cpu' or 'cuda'.\n\n Returns:\n torch.Tensor: Tensor containing sampled noise levels.\n \"\"\"\n # Get the N discretized noise levels\n discretized_sigmas = get_sigmas_exponential(N, self.sigma_min, self.sigma_max, self.device)\n \n # Sample indices from this discretized range\n t = torch.randint(1, N, size=shape, device=device)\n s = torch.round(torch.rand_like(t.to(torch.float32)) * t.to(torch.float32)).to(torch.int32)\n u = torch.round(torch.rand_like(t.to(torch.float32)) * (t.to(torch.float32) -1 - s.to(torch.float32))+ s).to(torch.int32)\n # Use these indices to gather the noise levels from the discretized sigmas\n sigma_t = discretized_sigmas[t]\n sigma_s = discretized_sigmas[s]\n sigma_u = discretized_sigmas[u]\n return sigma_t, sigma_s, sigma_u\n\n def solver(self, x, cond, t, s):\n \"\"\"\n Eq. (3) in the paper\n \"\"\"\n if self.use_teacher:\n solver = self.teacher_model\n else:\n solver = self.model\n\n if self.solver_type == 'euler':\n solver_pred = self.euler_update_step(solver, x, cond, t, s)\n elif self.solver_type == 'heun':\n solver_pred = self.heun_update_step(solver, x, cond, t, s)\n elif self.solver_type == 'ddim':\n solver_pred = self.ddim_update_step(solver, x, cond, t, s)\n\n return solver_pred\n\n \n def eval_step(self, x, cond):\n \"\"\"\n Eval step method to compute the loss for the action prediction.\n \"\"\"\n self.model.eval()\n self.target_model.eval()\n x = x.to(self.device)\n cond = cond.to(self.device)\n # next generate the discrete timesteps\n t = [self.sample_discrete_timesteps(i) for i in range(self.t_steps)]\n # compute the loss\n x_T = torch.randn_like(x) * self.sigma_max\n pred_x = self. sample(x_T, cond, t)\n loss = torch.nn.functional.mse_loss(pred_x, x)\n return loss\n \n def ctm_loss(self, x_t, cond, t, s, u, solver_target):\n \"\"\"\n # TODO add description\n\n Args:\n x (torch.Tensor): Input tensor of shape [batch_size, dim].\n cond (torch.Tensor): Conditioning tensor of shape [batch_size, cond_dim].\n t1 (torch.Tensor): First discrete timestep tensor of shape [batch_size, 1].\n t2 (torch.Tensor): Second discrete timestep tensor of shape [batch_size, 1].\n\n Returns:\n torch.Tensor: Consistency loss tensor of shape [].\n \"\"\"\n jump_target = einops.repeat(torch.tensor([0]), '1 -> (b 1)', b=len(x_t))\n # compute the cmt prediction: jump from t to s\n ctm_pred = self.cmt_wrapper(self.model, x_t, cond, t, s)\n\n # compute the cmt target prediction with ema parameters inside self.target_model: jump from u to s\n # with torch.no_grad():\n ctm_target = self.cmt_wrapper(self.target_model, solver_target, cond, u, s)\n ctm_target_clean = self.cmt_wrapper(self.target_model, ctm_target, cond, s, jump_target)\n\n # transform them into the clean data space by jumping without gradient from s to 0\n # for both predictions and comparing them in the clean data space\n # with torch.no_grad():\n ctm_pred_clean = self.cmt_wrapper(self.target_model, ctm_pred, cond, s, jump_target)\n \n # compute the cmt loss\n cmt_loss = torch.nn.functional.mse_loss(ctm_pred_clean, ctm_target_clean)\n\n return cmt_loss\n\n\n @torch.no_grad() \n def heun_update_step(self, model, x, cond, t1, t2):\n \"\"\"\n Computes a single Heun update step from the Euler sampler with the teacher model\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n denoised = self.cmt_wrapper(model, x, cond, t1, t1)\n d = (x - denoised) / append_dims(t1, x.ndim)\n \n \n sample_temp = x + d * append_dims(t2 - t1, x.ndim)\n denoised_2 = self.cmt_wrapper(model, sample_temp, cond, t2, t2)\n d_2 = (sample_temp - denoised_2) / append_dims(t2, x.ndim)\n d_prime = (d + d_2) / 2\n samples = x + d_prime * append_dims(t2 - t1, x.ndim)\n \n return samples\n \n @torch.no_grad() \n def ddim_update_step(self, model, x, cond, t1, t2):\n \"\"\"\n Computes a single Heun update step from the DDIM sampler with the teacher model\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n denoised = self.cmt_wrapper(model, x, cond, t1, t1)\n \n t, t_next = t_fn(t1), t_fn(t2)\n h = append_dims(t_next - t, x.ndim)\n samples = append_dims((sigma_fn(t_next) / sigma_fn(t)), x.ndim) * x - (-h).expm1() * denoised\n \n return samples\n\n def get_diffusion_scalings(self, sigma):\n \"\"\"\n Computes the scaling factors for diffusion training at a given time step sigma.\n\n Args:\n - self: the object instance of the model\n - sigma (float or torch.Tensor): the time step at which to compute the scaling factors\n \n , where self.sigma_data: the data noise level of the diffusion process, set during initialization of the model\n\n Returns:\n - c_skip (torch.Tensor): the scaling factor for skipping the diffusion model for the given time step sigma\n - c_out (torch.Tensor): the scaling factor for the output of the diffusion model for the given time step sigma\n - c_in (torch.Tensor): the scaling factor for the input of the diffusion model for the given time step sigma\n\n \"\"\"\n c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)\n c_out = sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5\n c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5\n return c_skip, c_out, c_in\n \n @staticmethod\n def mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))\n\n def diffusion_train_step(self, x, cond, train_step, max_steps):\n \"\"\"\n Computes the training loss and performs a single update step for the score-based model.\n\n Args:\n - self: the object instance of the model\n - x (torch.Tensor): the input tensor of shape (batch_size, dim)\n - cond (torch.Tensor): the conditional input tensor of shape (batch_size, cond_dim)\n\n Returns:\n - loss.item() (float): the scalar value of the training loss for this batch\n\n \"\"\"\n self.model.train()\n x = x.to(self.device)\n cond = cond.to(self.device)\n self.optimizer.zero_grad()\n t = self.make_sample_density()(shape=(len(x),), device=self.device)\n x_t = x + torch.randn_like(x) * append_dims(t, x.ndim)\n loss = self.diffusion_loss(x, x_t, cond, t)\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n \n def diffusion_loss(self, x, x_t, cond, t):\n \"\"\"\n Computes the diffusion training loss for the given model, input, condition, and time.\n\n Args:\n - self: the object instance of the model\n - x (torch.Tensor): the input tensor of shape (batch_size, channels, height, width)\n - cond (torch.Tensor): the conditional input tensor of shape (batch_size, cond_dim)\n - t (torch.Tensor): the time step tensor of shape (batch_size,)\n\n Returns:\n - loss (torch.Tensor): the diffusion training loss tensor of shape ()\n\n The diffusion training loss is computed based on the following equation from Karras et al. 2022:\n loss = (model_output - target)^2.mean()\n where,\n - noise: a tensor of the same shape as x, containing randomly sampled noise\n - x_t: a tensor of the same shape as x, obtained by adding the noise tensor to x\n - c_skip, c_out, c_in: scaling tensors obtained from the diffusion scalings for the given time step\n - t: a tensor of the same shape as t, obtained by taking the natural logarithm of t and dividing it by 4\n - model_output: the output tensor of the model for the input x_1, condition cond, and time t\n - target: the target tensor for the given input x, scaling tensors c_skip, c_out, c_in, and time t\n \"\"\"\n c_skip, c_out, c_in = [append_dims(x, 2) for x in self.get_diffusion_scalings(t)]\n t = torch.log(t) / 4\n model_output = self.model(x_t * c_in, cond, t, t)\n target = (x - c_skip * x_t) / c_out\n return (model_output - target).pow(2).mean()\n \n def update_teacher_model(self):\n self.teacher_model.load_state_dict(self.target_model.state_dict())\n for param in self.teacher_model.parameters():\n param.requires_grad = False\n \n # next we init the model and target model with the same weights from the teacher\n self.model.load_state_dict(self.teacher_model.state_dict())\n for param in self.model.parameters():\n param.requires_grad = True\n self.target_model.load_state_dict(self.teacher_model.state_dict())\n for param in self.target_model.parameters():\n param.requires_grad = False\n print('Updated Teacher Model and froze all parameters!')\n \n def euler_update_step(self, x, t1, t2, denoised):\n \"\"\"\n Computes a single update step from the Euler sampler with a ground truth value.\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n d = (x - denoised) / append_dims(t1, x.ndim)\n samples = x + d * append_dims(t2 - t1, x.ndim)\n return samples\n \n def euler_single_step(self, model, x, cond, t1, t2):\n \"\"\"\n \n \"\"\"\n denoised = self.diffusion_wrapper(model, x, cond, t1, t1)\n d = (x - denoised) / append_dims(t1, x.ndim)\n samples = x + d * append_dims(t2 - t1, x.ndim)\n return samples\n\n @torch.no_grad()\n @ema_eval_wrapper\n def sample_singlestep(self, x_shape, cond, return_seq=False):\n \"\"\"\n Samples a single step from the trained consistency trajectory model. \n If return_seq is True, returns a list of sampled tensors, \n otherwise returns a single tensor. \n \n Args:\n - x_shape (tuple): the shape of the tensor to be sampled.\n - cond (torch.Tensor or None): the conditional tensor.\n - return_seq (bool, optional): whether to return a list of sampled tensors (default False).\n \n Returns:\n - (torch.Tensor or list): the sampled tensor(s).\n \"\"\"\n sampled_x = []\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max\n sampled_x.append(x)\n x = self.cmt_wrapper(self.model, x, cond, torch.tensor([self.sigma_max]), torch.tensor([0]))\n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n \n @torch.no_grad()\n @ema_eval_wrapper\n def sample_diffusion_euler(self, x_shape, cond, n_sampling_steps=None, return_seq=False):\n \"\"\"\n Sample from the pre-trained diffusion model using the Euler method. This method is used for sanity checking \n the learned diffusion model. It generates a sequence of samples by taking small steps from one sample to the next. \n At each step, it generates a new noise from a normal distribution and combines it with the previous sample \n to get the next sample.\n \n Parameters:\n - x_shape (torch.Tensor): Shape of the input tensor to the model.\n - cond (torch.Tensor): Conditional information for the model.\n - n_sampling_steps (int, optional): Number of sampling steps to take. Defaults to None.\n - return_seq (bool, optional): Whether to return the full sequence of samples or just the final one. \n Defaults to False.\n \n Returns:\n - x (torch.Tensor or List[torch.Tensor]): Sampled tensor from the model. If `return_seq=True`, it returns\n a list of tensors, otherwise it returns a single tensor.\n \"\"\"\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max \n # x = torch.linspace(-4, 4, len(x_shape)).view(len(x_shape), 1).to(self.device)\n\n sampled_x = []\n if n_sampling_steps is None:\n n_sampling_steps = self.n_sampling_steps\n \n # sample the sequence of timesteps\n sigmas = self.sample_seq_timesteps(N=n_sampling_steps, type='exponential')\n sampled_x.append(x)\n # iterate over the remaining timesteps\n for i in trange(len(sigmas) - 1, disable=True):\n denoised = self.diffusion_wrapper(self.model, x, cond, sigmas[i], sigmas[i])\n x = self.euler_update_step(x, sigmas[i], sigmas[i+1], denoised)\n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n \n @torch.no_grad()\n @ema_eval_wrapper\n def ctm_gamma_sampler(self, x_shape, cond, gamma, n_sampling_steps=None, return_seq=False):\n \"\"\"\n Alg. 3 in the paper of CTM (page 22)\n \"\"\"\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max\n # x = torch.linspace(-4, 4, len(x_shape)).view(len(x_shape), 1).to(self.device)\n\n sampled_x = []\n if n_sampling_steps is None:\n n_sampling_steps = self.n_sampling_steps\n \n # sample the sequence of timesteps\n sigmas = self.sample_seq_timesteps(N=n_sampling_steps, type='exponential')\n sampled_x.append(x)\n # iterate over the remaining timesteps\n for i in trange(len(sigmas) - 1, disable=True):\n # get thenew sigma value \n sigma_hat = sigmas[i+1] * torch.sqrt(1 - gamma ** 2)\n # get the denoised value\n x_t_gamma = self.cmt_wrapper(self.model, x, cond, sigmas[i], sigma_hat)\n \n if sigmas[i + 1] > 0:\n x = x_t_gamma + gamma * sigmas[i+1] * torch.randn_like(x_shape).to(self.device)\n \n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n\n def sample_seq_timesteps(self, N=100, type='karras'):\n \"\"\"\n Generates a sequence of N timesteps for the given type.\n\n Args:\n - self: the object instance of the model\n - N (int): the number of timesteps to generate\n - type (str): the type of sequence to generate, either 'karras', 'linear', or 'exponential'\n\n Returns:\n - t (torch.Tensor): the generated sequence of timesteps of shape (N,)\n\n The method generates a sequence of timesteps for the given type using one of the following functions:\n - get_sigmas_karras: a function that generates a sequence of timesteps using the Karras et al. schedule\n - get_sigmas_linear: a function that generates a sequence of timesteps linearly spaced between sigma_min and sigma_max\n - get_sigmas_exponential: a function that generates a sequence of timesteps exponentially spaced between sigma_min and sigma_max\n where,\n - self.sigma_min, self.sigma_max: the minimum and maximum timesteps, set during initialization of the model\n - self.rho: the decay rate for the Karras et al. schedule, set during initialization of the model\n - self.device: the device on which to generate the timesteps, set during initialization of the model\n\n \"\"\"\n if type == 'karras':\n t = get_sigmas_karras(N, self.sigma_min, self.sigma_max, self.rho, self.device)\n elif type == 'linear':\n t = get_sigmas_linear(N, self.sigma_min, self.sigma_max, self.device)\n elif type == 'exponential':\n t = get_sigmas_exponential(N, self.sigma_min, self.sigma_max, self.device)\n else:\n raise NotImplementedError('Chosen Scheduler is implemented!')\n return t\n \n def make_sample_density(self):\n \"\"\"\n Returns a function that generates random timesteps based on the chosen sample density.\n\n Args:\n - self: the object instance of the model\n\n Returns:\n - sample_density_fn (callable): a function that generates random timesteps\n\n The method returns a callable function that generates random timesteps based on the chosen sample density.\n The available sample densities are:\n - 'lognormal': generates random timesteps from a log-normal distribution with mean and standard deviation set\n during initialization of the model also used in Karras et al. (2022)\n - 'loglogistic': generates random timesteps from a log-logistic distribution with location parameter set to the\n natural logarithm of the sigma_data parameter and scale and range parameters set during initialization\n of the model\n - 'loguniform': generates random timesteps from a log-uniform distribution with range parameters set during\n initialization of the model\n - 'uniform': generates random timesteps from a uniform distribution with range parameters set during initialization\n of the model\n - 'v-diffusion': generates random timesteps using the Variational Diffusion sampler with range parameters set during\n initialization of the model\n - 'discrete': generates random timesteps from the noise schedule using the exponential density\n - 'split-lognormal': generates random timesteps from a split log-normal distribution with mean and standard deviation\n set during initialization of the model\n \"\"\"\n sd_config = []\n \n if self.sigma_sample_density_type == 'lognormal':\n loc = self.sigma_sample_density_mean # if 'mean' in sd_config else sd_config['loc']\n scale = self.sigma_sample_density_std # if 'std' in sd_config else sd_config['scale']\n return partial(rand_log_normal, loc=loc, scale=scale)\n \n if self.sigma_sample_density_type == 'loglogistic':\n loc = sd_config['loc'] if 'loc' in sd_config else math.log(self.sigma_data)\n scale = sd_config['scale'] if 'scale' in sd_config else 0.5\n min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)\n \n if self.sigma_sample_density_type == 'loguniform':\n min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_log_uniform, min_value=min_value, max_value=max_value)\n if self.sigma_sample_density_type == 'uniform':\n return partial(rand_uniform, min_value=self.sigma_min, max_value=self.sigma_max)\n\n if self.sigma_sample_density_type == 'v-diffusion':\n min_value = self.min_value if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_v_diffusion, sigma_data=self.sigma_data, min_value=min_value, max_value=max_value)\n if self.sigma_sample_density_type == 'discrete':\n sigmas = self.get_noise_schedule(self.n_sampling_steps, 'exponential')\n return partial(rand_discrete, values=sigmas)\n else:\n raise ValueError('Unknown sample density type')" }, { "identifier": "DataGenerator", "path": "ctm/toy_tasks/data_generator.py", "snippet": "class DataGenerator:\n def __init__(self, dist_type: str):\n self.dist_type = dist_type\n self.func_mapping = {\n \"two_gmm_1D\": (self.two_gmm_1D, self.two_gmm_1D_log_prob),\n \"uneven_two_gmm_1D\": (self.uneven_two_gmm_1D, self.uneven_two_gmm_1D_log_prob),\n \"three_gmm_1D\": (self.three_gmm_1D, self.three_gmm_1D_log_prob),\n \"single_gaussian_1D\": (self.single_gaussian_1D, self.single_gaussian_1D_log_prob),\n }\n if self.dist_type not in self.func_mapping:\n raise ValueError(\"Invalid distribution type\")\n self.sample_func, self.log_prob_func = self.func_mapping[self.dist_type]\n\n def generate_samples(self, num_samples: int):\n \"\"\"\n Generate `num_samples` samples and labels using the `sample_func`.\n \n Args:\n num_samples (int): Number of samples to generate.\n \n Returns:\n Tuple[np.ndarray, np.ndarray]: A tuple of two numpy arrays containing the generated samples and labels.\n \"\"\"\n samples, labels = self.sample_func(num_samples)\n return samples, labels\n \n def compute_log_prob(self, samples, exp: bool = False):\n \"\"\"\n Compute the logarithm of probability density function (pdf) of the given `samples`\n using the `log_prob_func`. If `exp` is True, return exponentiated log probability.\n \n Args:\n samples (np.ndarray): Samples for which pdf is to be computed.\n exp (bool, optional): If True, return exponentiated log probability.\n Default is False.\n \n Returns:\n np.ndarray: Logarithm of probability density function (pdf) of the given `samples`.\n If `exp` is True, exponentiated log probability is returned.\n \"\"\"\n return self.log_prob_func(samples, exp=exp)\n\n @staticmethod\n def two_gmm_1D(num_samples,):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of two Gaussians with equal weights.\n \n Args:\n num_samples (int): Number of samples to generate.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.3)\n mixture_probs = torch.ones(num_samples) * 0.5\n is_from_g1 = torch.bernoulli(mixture_probs).bool()\n samples = torch.where(is_from_g1, g1.sample((num_samples,)), g2.sample((num_samples,)))\n return samples, is_from_g1.int()\n\n @staticmethod\n def uneven_two_gmm_1D(num_samples, w1=0.7):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of two Gaussians with weights `w1` and `w2`.\n \n Args:\n num_samples (int): Number of samples to generate.\n w1 (float, optional): Weight of first Gaussian component. Default is 0.7.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.2)\n mixture_probs = torch.tensor([w1, 1-w1])\n is_from_g1 = torch.bernoulli(mixture_probs.repeat(num_samples, 1)).view(num_samples, -1).bool().squeeze()\n \n samples_g1 = g1.sample((num_samples, 1))\n samples_g2 = g2.sample((num_samples, 1))\n samples = torch.where(is_from_g1, samples_g1, samples_g2).squeeze()\n\n return samples, is_from_g1.int()\n \n @staticmethod\n def single_gaussian_1D(num_samples):\n \"\"\"\n Generates `num_samples` samples from a 1D Gaussian distribution.\n \n Args:\n num_samples (int): Number of samples to generate.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n Since there is only one Gaussian component, all labels will be zero.\n \"\"\"\n g1 = Normal(loc=1, scale=0.2)\n samples = g1.sample((num_samples, 1))\n return samples, torch.zeros(num_samples).int()\n\n @staticmethod\n def three_gmm_1D(num_samples):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of three Gaussians with equal weights.\n \n Args:\n num_samples (int): Number of samples to generate.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and integer labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.2)\n g2 = Normal(loc=0, scale=0.2)\n g3 = Normal(loc=1.5, scale=0.2)\n mixture_probs = torch.ones(3) / 3\n component_assignments = torch.multinomial(mixture_probs, num_samples, replacement=True)\n samples = torch.zeros(num_samples, 1)\n \n g1_mask = (component_assignments == 0)\n g2_mask = (component_assignments == 1)\n g3_mask = (component_assignments == 2)\n \n samples[g1_mask] = g1.sample((g1_mask.sum(), )).view(-1, 1)\n samples[g2_mask] = g2.sample((g2_mask.sum(), )).view(-1, 1)\n samples[g3_mask] = g3.sample((g3_mask.sum(), )).view(-1, 1)\n \n return samples, component_assignments.int()\n\n @staticmethod\n def two_gmm_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of two Gaussians\n with equal weights at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of two Gaussians\n with equal weights at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.3)\n f = torch.log(0.5 * (g1.log_prob(z).exp() + g2.log_prob(z).exp()))\n if exp:\n return torch.exp(f)\n else:\n return f\n \n @staticmethod\n def uneven_two_gmm_1D_log_prob(z, w1=0.7, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of two Gaussians\n with weights `w1` and `w2` at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n w1 (float, optional): Weight of first Gaussian component. Default is 0.7.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of two Gaussians\n with weights `w1` and `w2` at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.2)\n f = torch.log(w1 * g1.log_prob(z).exp() + (1 - w1) * g2.log_prob(z).exp())\n if exp:\n return torch.exp(f)\n else:\n return f\n\n @staticmethod\n def three_gmm_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of three Gaussians\n with equal weights at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of three Gaussians\n with equal weights at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.2)\n g2 = Normal(loc=0, scale=0.2)\n g3 = Normal(loc=1.5, scale=0.2)\n f = torch.log(1/3 * (g1.log_prob(z).exp() + g2.log_prob(z).exp() + g3.log_prob(z).exp()))\n if exp:\n return torch.exp(f)\n else:\n return f\n\n @staticmethod\n def single_gaussian_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D Gaussian\n distribution at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D Gaussian\n distribution at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g = Normal(loc=1, scale=0.2)\n f = g.log_prob(z)\n if exp:\n return torch.exp(f)\n else:\n return f" }, { "identifier": "plot_main_figure", "path": "ctm/visualization/vis_utils.py", "snippet": "def plot_main_figure(\n fn, \n model, \n n_samples, \n train_epochs, \n sampling_method='euler',\n x_range=[-4, 4], \n n_sampling_steps = 10,\n save_path='/home/moritz/code/cm_1D_Toy_Task/plots'\n): \n \"\"\"\n Plot the main figure for the given model and sampling method.\n Args:\n fn (callable): Target function to be plotted.\n model (object): Model to be used for sampling (ConsistencyModel or Beso).\n n_samples (int): Number of samples to be taken.\n train_epochs (int): Number of training epochs.\n sampling_method (str, optional): Method to be used for sampling ('multistep', 'onestep', or 'euler'). Defaults to False.\n x_range (list, optional): Range of x values to be plotted. Defaults to [-5, 5].\n n_sampling_steps (int, optional): Number of sampling steps. Defaults to 10.\n save_path (str, optional): Directory to save the plot. Defaults to '/home/moritz/code/cm_1D_Toy_Task/plots'.\n\n Raises ValueError: If the sampling_method is not one of the specified options ('multistep', 'onestep', or 'euler').\n \"\"\"\n test_samples = get_test_samples(model, n_samples, sampling_method, n_sampling_steps)\n test_samples = [x.detach().cpu().numpy() for x in test_samples]\n test_samples = np.stack(test_samples, axis=1)\n\n x_test = np.linspace(x_range[0], x_range[1], n_samples)\n target_fn = fn(torch.tensor(x_test), exp=True)\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 10), sharex=True)\n ax1.set_xlim(*x_range)\n ax2.set_xlim(*x_range)\n ax3.set_xlim(*x_range)\n\n # Plot target distribution\n ax1.plot(x_test, target_fn, color='black', label='Target Distribution')\n\n # Plot predicted distribution\n kde = gaussian_kde(test_samples[:, -1, 0], bw_method=0.1)\n predicted_distribution = kde(x_test)\n ax1.plot(x_test, predicted_distribution, label='Predicted Distribution')\n\n # Create a LineCollection to show colors on the predicted distribution line\n points = np.array([x_test, predicted_distribution]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap='viridis', norm=plt.Normalize(predicted_distribution.min(), predicted_distribution.max()))\n lc.set_array(predicted_distribution)\n lc.set_linewidth(2)\n\n ax1.add_collection(lc)\n stepsize = np.linspace(0, 1, model.n_sampling_steps)\n # stepsize = cm.get_noise_schedule(model.n_sampling_steps, noise_schedule_type='exponential').flip(0)\n # ax2.set_ylim(-0.1, 1.1)\n if sampling_method == 'onestep':\n n_sampling_steps = 1\n stepsize = np.linspace(0, 1, 2)\n ax2.quiver(test_samples[:, 0].reshape(-1),\n stepsize[0] * np.ones(n_samples),\n test_samples[:, 1].reshape(-1) - test_samples[:, 0].reshape(-1),\n stepsize[1] * np.ones(n_samples) - stepsize[0] * np.ones(n_samples),\n angles='xy', scale_units='xy', scale=1,\n width=0.001\n )\n else:\n n_sampling_steps = n_sampling_steps\n for i in range(1, n_sampling_steps):\n ax2.quiver(test_samples[:, i - 1].reshape(-1),\n stepsize[i - 1] * np.ones(n_samples),\n test_samples[:, i].reshape(-1) - test_samples[:, i-1].reshape(-1),\n stepsize[i] * np.ones(n_samples) - stepsize[i - 1] * np.ones(n_samples),\n angles='xy', scale_units='xy', scale=1,\n width=0.001\n )\n ax2.set_yticks([stepsize.min(), stepsize.max()])\n ax2.set_ylim(stepsize.min(), stepsize.max())\n \n mu = 0 # mean\n sigma = model.sigma_max # standard deviation\n\n # Compute the PDF values for x_test\n prob_samples = norm.pdf(x_test, loc=mu, scale=sigma)\n # Create a LineCollection to show colors on the normal distribution line\n points = np.array([x_test, prob_samples]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap='viridis', norm=plt.Normalize(prob_samples.min(), prob_samples.max()))\n lc.set_array(prob_samples)\n lc.set_linewidth(2)\n\n ax3.add_collection(lc)\n ax3.set_ylim(0, 0.5)\n\n # ... (previous code remains unchanged)\n ax2.set_xticks([])\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax3.set_yticks([])\n ax2.set_yticklabels(['T', '0'])\n ax2.tick_params(axis='y', labelsize=16)\n # ax2.set_yticks('log')\n plt.subplots_adjust(hspace=0)\n plt.savefig(save_path + '/cm_' + sampling_method + f'_epochs_{train_epochs}.png', bbox_inches='tight', pad_inches=0.1) \n \n print('Plot saved!')" } ]
from tqdm import tqdm from ctm.ctm import ConsistencyTrajectoryModel from ctm.toy_tasks.data_generator import DataGenerator from ctm.visualization.vis_utils import plot_main_figure
11,771
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True cm = ConsistencyTrajectoryModel( data_dim=1, cond_dim=1, sampler_type='euler', lr=4e-4, sigma_data=0.5, sigma_min=0.05, solver_type='heun', sigma_max=2, n_discrete_t=18, conditioned=False, diffusion_lambda= 1, device=device, rho=7, ema_rate=0.999, use_teacher=use_pretraining, ) train_epochs = 2002 # chose one of the following toy tasks: 'three_gmm_1D' 'uneven_two_gmm_1D' 'two_gmm_1D' 'single_gaussian_1D' data_manager = DataGenerator('three_gmm_1D') samples, cond = data_manager.generate_samples(5000) samples = samples.reshape(-1, 1).to(device) pbar = tqdm(range(train_epochs)) # if not simultanous_training: # First pretrain the diffusion model and then train the consistency model if use_pretraining: for i in range(train_epochs): cond = cond.reshape(-1, 1).to(device) diff_loss = cm.diffusion_train_step(samples, cond, i, train_epochs) pbar.set_description(f"Step {i}, Diff Loss: {diff_loss:.8f}") pbar.update(1) cm.update_teacher_model()
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True cm = ConsistencyTrajectoryModel( data_dim=1, cond_dim=1, sampler_type='euler', lr=4e-4, sigma_data=0.5, sigma_min=0.05, solver_type='heun', sigma_max=2, n_discrete_t=18, conditioned=False, diffusion_lambda= 1, device=device, rho=7, ema_rate=0.999, use_teacher=use_pretraining, ) train_epochs = 2002 # chose one of the following toy tasks: 'three_gmm_1D' 'uneven_two_gmm_1D' 'two_gmm_1D' 'single_gaussian_1D' data_manager = DataGenerator('three_gmm_1D') samples, cond = data_manager.generate_samples(5000) samples = samples.reshape(-1, 1).to(device) pbar = tqdm(range(train_epochs)) # if not simultanous_training: # First pretrain the diffusion model and then train the consistency model if use_pretraining: for i in range(train_epochs): cond = cond.reshape(-1, 1).to(device) diff_loss = cm.diffusion_train_step(samples, cond, i, train_epochs) pbar.set_description(f"Step {i}, Diff Loss: {diff_loss:.8f}") pbar.update(1) cm.update_teacher_model()
plot_main_figure(
2
2023-11-07 15:30:11+00:00
16k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
dynapipe/pipe/data_loader.py
[ { "identifier": "ProfileBasedCostModelWithRC", "path": "dynapipe/data_opt/cost_models.py", "snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n def __init__(\n self,\n profile_paths=None,\n _serialized_cms: Optional[Dict[Tuple[int, str], bytes]] = None,\n ) -> None:\n self.cost_models: dict[str, ProfileBasedCostModel] = {}\n if _serialized_cms is not None:\n for cm_key, serialized_cm in _serialized_cms.items():\n self.cost_models[cm_key] = ProfileBasedCostModel.deserialize(\n serialized_cm\n )\n return\n if not isinstance(profile_paths, list):\n # profile_paths is a dir\n assert os.path.isdir(profile_paths), (\n f\"Profile path {profile_paths} is not a directory \"\n \"or list of paths\"\n )\n profile_paths = [\n os.path.join(profile_paths, x)\n for x in os.listdir(profile_paths)\n if x.startswith(\"microbench\") and x.endswith(\"txt\")\n ]\n # separate paths by cost model key (tp_size, rc_type)\n self.per_key_profile_paths = defaultdict(list)\n for path in profile_paths:\n cm_key = self._parse_cm_key(path)\n self.per_key_profile_paths[cm_key].append(path)\n for cm_key, paths in self.per_key_profile_paths.items():\n self.cost_models[cm_key] = ProfileBasedCostModel(paths)\n\n def _parse_cm_key(self, filename):\n basename = os.path.basename(filename)\n if \"rc_full_uniform\" in basename:\n rc_type = \"full\"\n elif \"rc_selective\" in basename:\n rc_type = \"selective\"\n else:\n rc_type = \"none\"\n tp_size = int(basename.split(\"_\")[1][2:])\n return tp_size, rc_type\n\n def _check_valid_cm_key(self, cm_key):\n assert (\n cm_key in self.cost_models\n ), f\"Key {cm_key} not recorded in profile.\"\n\n def is_valid_stage(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].is_valid_stage(stage)\n\n def valid_stages(self, tp_size, rc_type):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].valid_stages()\n\n def supported_sequence_lengths(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].supported_sequence_lengths(\n stage\n )\n\n def get_cost(\n self,\n tp_size,\n rc_type,\n stage,\n seq_len,\n mbs,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the computation cost.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_cost(\n stage, seq_len, mbs\n )\n\n def get_stored_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the stored activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_stored_activation(\n stage, seq_len, mbs\n )\n\n def get_peak_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the peak activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_peak_activation(\n stage, seq_len, mbs\n )\n\n def get_model_state(\n self,\n tp_size,\n rc_type,\n stage,\n n_shards=1,\n zero_stage=0,\n param_factor=None,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the model state.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_model_state(\n stage,\n n_shards=n_shards,\n zero_stage=zero_stage,\n param_factor=param_factor,\n )\n\n def get_raw_cost_model(self, tp_size, rc_type):\n \"\"\"Get the raw cost model for the given TP degree and recomputation\n type.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)]\n\n def save(self, path):\n serialized_dict = {}\n for cm_key, cost_model in self.cost_models.items():\n serialized_dict[cm_key] = cost_model.serialize()\n with open(path, \"wb\") as f:\n pickle.dump(serialized_dict, f)\n\n @classmethod\n def load(cls, path):\n with open(path, \"rb\") as f:\n serialized_dict = pickle.load(f)\n return cls(_serialized_cms=serialized_dict)" }, { "identifier": "DataAssignmentOptimizer", "path": "dynapipe/data_opt/optimizer.py", "snippet": "class DataAssignmentOptimizer(object):\n \"\"\"Data assignment optimizer.\n\n Optimizes the assignment of a mini-batch of data into micro-batches.\n \"\"\"\n\n def __init__(\n self,\n cost_model: ProfileBasedCostModelWithRC,\n model_spec: TransformerModelSpec,\n n_executors: int,\n n_layers_per_stage: int,\n n_chunks_per_device: int = 1,\n dp_size: int = 1,\n tp_size: int = 1,\n zero_stage: int = 0,\n device_memory_limit: float = float(\"inf\"),\n round_seqlen_multiple=8,\n per_mb_memory_fraction=None,\n len_pack_sep_tokens=1,\n len_decoder_additional_tokens=2,\n seqlen_offset=0,\n ):\n \"\"\"Optimizer for assigning data samples into micro-batches.\n cost_model: cost model for the model used\n model_spec: model specification\n n_executors: number of stages of the pipelined model\n n_layers_per_stage: number of layers per each pipeline stage\n n_chunks_per_device: number of chunks per device\n (> 1 indicating interleaved schedule)\n dp_size: data parallelism degree\n tp_size: tensor parallelism degree\n zero_stage: stage of ZeRO optimizer\n device_memory_limit: memory limit in MB (MegaBytes)\n round_seqlen_multiple: always round sequence length to multiple of\n this number, required for some kernels\n default: 8\n len_pack_sep_tokens: number of tokens used to separate samples in the\n packed sequence, only used when enable_packing\n is True during optimization.\n len_decoder_additional_tokens: number of additional tokens added to\n the decoder sequence length other than\n the target sequence, e.g. <bos>, <eos>\n seqlen_offset: should be set 1 for decoder only models, whose input\n and target sequences are data sequence length - 1\n 0 for encoder-decoder models.\n \"\"\"\n self.cost_model = cost_model\n self.n_executors = n_executors\n self.n_layers_per_stage = n_layers_per_stage\n # create memory model\n self.model_spec = model_spec\n self.memory_limit = device_memory_limit\n self.dp_size = dp_size\n self.tp_size = tp_size\n self.zero_stage = zero_stage\n self.round_seqlen_multiple = round_seqlen_multiple\n self.len_pack_sep_tokens = len_pack_sep_tokens\n self.len_decoder_additional_tokens = len_decoder_additional_tokens\n self.n_chunks_per_device = n_chunks_per_device\n self.per_mb_memory_fraction = per_mb_memory_fraction\n self.seqlen_offset = seqlen_offset\n\n def _round_seqlen(self, seqlen, decoder=False):\n if decoder:\n seqlen += self.len_decoder_additional_tokens\n seqlen -= self.seqlen_offset\n return (\n (seqlen + self.round_seqlen_multiple - 1)\n // self.round_seqlen_multiple\n * self.round_seqlen_multiple\n + self.seqlen_offset\n )\n\n def _solve_sample_order_tsp_problem(\n self,\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=True,\n dist_function=\"sum\",\n use_clustering=True,\n distance_threshold=16,\n ):\n \"\"\"Solve the TSP problem to determine the sample order.\"\"\"\n if dist_function == \"sum\":\n\n def _f_dist(x, y):\n return abs(int(x[0]) - int(y[0])) + abs(int(x[1]) - int(y[1]))\n\n elif dist_function == \"max\":\n\n def _f_dist(x, y):\n return max(\n abs(int(x[0]) - int(y[0])), abs(int(x[1]) - int(y[1]))\n )\n\n elif dist_function == \"square\":\n\n def _f_dist(x, y):\n return (int(x[0]) - int(y[0])) ** 2 + (\n int(x[1]) - int(y[1])\n ) ** 2\n\n else:\n raise ValueError(\n \"Unknown distance function: {}\".format(dist_function)\n )\n\n def _get_distance_matrix(points):\n # add a dummy point at the beginning\n # to transform it into an open TSP problem\n distance_matrix = [[0] * (len(points) + 1)]\n for x in points:\n row = [0]\n for y in points:\n row.append(_f_dist(x, y))\n distance_matrix.append(row)\n return distance_matrix\n\n input_points = list(\n zip(sample_sequence_lengths, decoder_sample_sequence_lengths)\n )\n if use_clustering:\n vectors_np = np.array(input_points)\n clustering = AgglomerativeClustering(\n n_clusters=None,\n distance_threshold=distance_threshold,\n linkage=\"complete\",\n ).fit(vectors_np)\n labels = clustering.labels_\n n_clusters = max(labels) + 1\n cluster_to_samples = [[] for _ in range(n_clusters)]\n cluster_to_data = [[] for _ in range(n_clusters)]\n for sample_idx, label in enumerate(labels):\n cluster_to_samples[label].append(sample_idx)\n cluster_to_data[label].append(input_points[sample_idx])\n # compute cluster centroids\n cluster_to_center = [None] * n_clusters\n for cluster_label, data in enumerate(cluster_to_data):\n cluster_to_center[cluster_label] = tuple(np.mean(data, axis=0))\n # compute tsp for cluster centroids\n distance_matrix = np.array(_get_distance_matrix(cluster_to_center))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n # reconstruct orig order\n result = []\n for cluster_label in permutation:\n result += cluster_to_samples[cluster_label]\n # sanity check result is a valid permutation\n assert sorted(result) == list(range(len(result)))\n return result\n\n distance_matrix = np.array(_get_distance_matrix(input_points))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n return permutation\n\n def _pack(\n self,\n sequence: list,\n current_enc_length,\n current_dec_length,\n target_enc_length,\n target_dec_length,\n next_idx,\n samples_with_ids,\n consumed,\n ):\n for j in range(next_idx, len(samples_with_ids)):\n if consumed[j]:\n continue\n (\n seqlen_to_pack,\n dec_seqlen_to_pack,\n sample_id_to_pack,\n ) = samples_with_ids[j]\n if (\n current_enc_length + seqlen_to_pack <= target_enc_length\n and current_dec_length + dec_seqlen_to_pack\n <= target_dec_length\n ):\n sequence.append(sample_id_to_pack)\n current_enc_length += seqlen_to_pack\n current_dec_length += dec_seqlen_to_pack\n consumed[j] = True\n return current_enc_length, current_dec_length\n\n def _uniform_partition(self, samples_with_ids, microbatch_size):\n max_sequence_length = max([x[0] for x in samples_with_ids])\n max_decoder_sequence_length = max([x[1] for x in samples_with_ids])\n\n # round sequence length to multiple of round_seqlen_multiple\n max_sequence_length = self._round_seqlen(max_sequence_length)\n max_decoder_sequence_length = self._round_seqlen(\n max_decoder_sequence_length, decoder=True\n )\n # pack all sequences into fixed sequence length\n target_src_seqlen = max_sequence_length\n target_tgt_seqlen = (\n max_decoder_sequence_length - self.len_decoder_additional_tokens\n )\n consumed = [False] * len(samples_with_ids)\n sequences = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n if consumed[idx]:\n continue\n curr_sequence = []\n curr_sequence_seqlen = seqlen\n curr_sequence_dec_seqlen = dec_seqlen\n curr_sequence.append(idx)\n curr_sequence_seqlen, curr_sequence_dec_seqlen = self._pack(\n curr_sequence,\n curr_sequence_seqlen,\n curr_sequence_dec_seqlen,\n target_src_seqlen,\n target_tgt_seqlen,\n idx + 1,\n samples_with_ids,\n consumed,\n )\n sequences.append(curr_sequence)\n consumed[idx] = True\n # divide sequences into microbatches\n microbatches = []\n for i in range(0, len(sequences), microbatch_size):\n microbatches.append(sequences[i : i + microbatch_size])\n return microbatches\n\n def _token_based_partition(self, samples_with_ids, microbatch_tokens):\n microbatches = []\n current_microbatch_tokens = 0\n current_microbatch = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n rounded_seqlen = self._round_seqlen(seqlen)\n rounded_dec_seqlen = self._round_seqlen(dec_seqlen, decoder=True)\n if (\n current_microbatch_tokens + rounded_seqlen + rounded_dec_seqlen\n > microbatch_tokens\n ):\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch.copy())\n current_microbatch = []\n current_microbatch_tokens = 0\n current_microbatch.append([idx])\n current_microbatch_tokens += seqlen + dec_seqlen\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch)\n return microbatches\n\n def _subset_partition(self, micro_batch_costs):\n # partition the microbatches into subsets\n # create a mapping from microbatch index to its cost\n mb_cost_map = {}\n for i, mb in enumerate(micro_batch_costs):\n mb_cost_map[i] = mb\n return prtpy.partition(\n algorithm=prtpy.partitioning.kk,\n numbins=self.dp_size,\n items=mb_cost_map,\n )\n\n def generate_microbatches(\n self,\n sample_sequence_lengths,\n available_rc_types=None,\n decoder_sample_sequence_lengths=None,\n disable_tsp=False,\n bottleneck_tsp=False,\n tsp_dist_function=\"sum\",\n tsp_use_clustering=True,\n tsp_cluster_distance_threshold=16,\n partition_method=\"dp\",\n uniform_partition_batch_size=None,\n token_based_partition_mb_tokens=None,\n enable_packing=False,\n ):\n if available_rc_types is None:\n available_rc_types = [\"none\", \"selective\", \"full\"]\n if (\n self.n_chunks_per_device > 1\n and decoder_sample_sequence_lengths is None\n ):\n raise ValueError(\n \"Interleaved schedule with non-encoder-decoder models \"\n \"are not supported yet.\"\n )\n # stage 1: determine the order of samples\n if decoder_sample_sequence_lengths is None:\n samples_with_ids = [\n (seqlen, 0, i)\n for i, seqlen in enumerate(sample_sequence_lengths)\n ]\n # single sequence, sorting suffices\n samples_with_ids.sort(reverse=True)\n else:\n if partition_method == \"uniform\":\n assert uniform_partition_batch_size is not None, (\n \"uniform_partition_batch_size must be specified \"\n \"when partition_method is 'uniform'\"\n )\n # uniform partitioning, don't need to solve TSP\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n else:\n # multiple sequences, use TSP or 2 level sorting\n # to find the optimal order\n if disable_tsp:\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n # sort first by encoder sequence length, then by decoder\n samples_with_ids.sort(reverse=True)\n else:\n permutation = self._solve_sample_order_tsp_problem(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=bottleneck_tsp,\n dist_function=tsp_dist_function,\n use_clustering=tsp_use_clustering,\n distance_threshold=tsp_cluster_distance_threshold,\n )\n samples_with_ids = [\n (\n sample_sequence_lengths[i],\n decoder_sample_sequence_lengths[i],\n int(i),\n )\n for i in permutation\n ]\n # stage 2: splitting and packing\n # we first calculate the model states memory and subtract it\n # from the memory limit\n # We assume that GPU0 is the bottleneck GPU, which holds Embedding\n # and Encoder of the model if not interleaved, and holds Embedding,\n # Encoder and Decoder of the model if interleaved.\n # rc_type doesn't matter here\n model_states_memory = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Embedding\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n encoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Encoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n if decoder_sample_sequence_lengths is not None:\n decoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Decoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n else:\n decoder_model_state = 0\n if self.n_chunks_per_device == 1:\n # not interleaved\n layer_states = max(encoder_model_state, decoder_model_state)\n else:\n # interleaved\n layer_states = encoder_model_state + decoder_model_state\n layer_states = layer_states * self.n_chunks_per_device / 2\n layer_states *= self.n_layers_per_stage\n model_states_memory += layer_states\n available_memory = self.memory_limit - model_states_memory\n\n if (\n self.per_mb_memory_fraction is not None\n and self.per_mb_memory_fraction > 0\n ):\n preferred_memory_limit = (\n self.per_mb_memory_fraction * available_memory\n )\n else:\n preferred_memory_limit = available_memory / self.n_executors\n for memory_type, memory_limit in [\n (\"preferred\", preferred_memory_limit),\n (\"available\", available_memory),\n ]:\n # first try to find a partition that do not need special schedule\n # if not found, only make sure that each single microbatch\n # fits in memory\n for rc_type in available_rc_types:\n if partition_method == \"dp\":\n # use dynamic programming to find optimal\n # sequential partition\n (\n objective_value,\n microbatches,\n microbatch_costs,\n ) = cpp_consecutive_partition_dp(\n self.cost_model.get_raw_cost_model(\n self.tp_size, rc_type\n ),\n self.n_executors,\n self.n_chunks_per_device,\n self.n_layers_per_stage,\n self.dp_size,\n memory_limit,\n available_memory,\n samples_with_ids,\n enable_packing=enable_packing,\n round_seqlen_multiple=self.round_seqlen_multiple,\n len_pack_sep_tokens=self.len_pack_sep_tokens,\n len_decoder_additional_tokens=self.len_decoder_additional_tokens, # noqa\n )\n elif partition_method == \"token_based\":\n assert token_based_partition_mb_tokens is not None, (\n \"token_based_partition_mb_tokens must be specified \"\n \"when partition_method is 'token_based'\"\n )\n # token based partitioning\n microbatches = self._token_based_partition(\n samples_with_ids, token_based_partition_mb_tokens\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n elif partition_method == \"uniform\":\n microbatches = self._uniform_partition(\n samples_with_ids, uniform_partition_batch_size\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n else:\n raise ValueError(\n \"unknown partition method: {}\".format(partition_method)\n )\n if math.isinf(objective_value[0]) or math.isnan(\n objective_value[0]\n ):\n # memory limit is too small\n continue\n # sanity check microbatches:\n # make sure that each index appears once and only once\n all_indices = set()\n for mb in microbatches:\n for sample in mb:\n for index in sample:\n assert (\n index not in all_indices\n ), \"index {} appears more than once\".format(index)\n all_indices.add(index)\n assert sorted(list(all_indices)) == list(\n range(len(samples_with_ids))\n ), (\n \"not all indices appear in microbatches: \"\n \"{} v.s. {}. Input seqlens: {}, target seqlens: {}\".format(\n len(all_indices),\n len(samples_with_ids),\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n # partition microbatches into subsets, each for one data\n # parallel group\n if self.dp_size > 1:\n partitioned_microbatch_ids = self._subset_partition(\n microbatch_costs\n )\n partitioned_microbatches = []\n for mb_ids in partitioned_microbatch_ids:\n partitioned_microbatches.append(\n [microbatches[i] for i in sorted(mb_ids)]\n )\n else:\n partitioned_microbatches = [microbatches]\n return (\n objective_value,\n partitioned_microbatches,\n memory_type,\n rc_type,\n (available_memory, model_states_memory, memory_limit),\n )\n # no feasible microbatch split found\n return None, None, None, None, None" }, { "identifier": "DynaPipeCluster", "path": "dynapipe/model.py", "snippet": "class DynaPipeCluster:\n def __init__(\n self,\n device2node: Dict[int, int],\n memory_limits: List[int],\n intra_node_bw_gbps: float,\n inter_node_bw_gbps: float,\n intra_node_lat_us: float,\n inter_node_lat_us: float,\n ) -> None:\n # memory_limits is in MB (megabytes)\n # bw is in Gbps (gigabits per second)\n # lat is in us (microseconds)\n devices = set()\n nodes = set()\n for device, node in device2node.items():\n devices.add(device)\n nodes.add(node)\n self.n_devices = len(devices)\n self.n_nodes = len(nodes)\n self.device2node = device2node\n flattened_devices = [device for device in device2node.keys()]\n assert list(sorted(list(set(flattened_devices)))) == list(\n range(self.n_devices)\n ), \"Device ids must be contiguous and start at 0\"\n assert len(memory_limits) == self.n_devices, (\n \"Expected memory limits for each of the \"\n f\"{self.n_devices} devices, but got \"\n f\"{len(memory_limits)} numbers.\"\n )\n self.memory_limits = memory_limits\n self.intra_node_bw = intra_node_bw_gbps\n self.inter_node_bw = inter_node_bw_gbps\n self.intra_node_lat = intra_node_lat_us\n self.inter_node_lat = inter_node_lat_us\n\n def _get_bw(self, dev0, dev1):\n if self.device2node[dev0] == self.device2node[dev1]:\n return self.intra_node_bw\n else:\n return self.inter_node_bw\n\n def _get_lat(self, dev0, dev1):\n if self.device2node[dev0] == self.device2node[dev1]:\n return self.intra_node_lat\n else:\n return self.inter_node_lat\n\n def get_comm_time(self, megabytes, dev0, dev1):\n if dev0 == dev1:\n return 0\n return self._get_lat(dev0, dev1) + 1e6 * (\n megabytes * 8 / 1e3\n ) / self._get_bw(dev0, dev1)\n\n def get_memory_limit(self, dev):\n return self.memory_limits[dev]\n\n def to_json(self) -> dict:\n return {\n \"n_devices\": self.n_devices,\n \"n_nodes\": self.n_nodes,\n \"device2node\": self.device2node,\n \"memory_limits\": self.memory_limits,\n \"intra_node_bw\": self.intra_node_bw,\n \"inter_node_bw\": self.inter_node_bw,\n \"intra_node_lat\": self.intra_node_lat,\n \"inter_node_lat\": self.inter_node_lat,\n }\n\n def dumps(self) -> str:\n return json.dumps(self.to_json())\n\n @staticmethod\n def loads(json_str: str) -> \"DynaPipeCluster\":\n return DynaPipeCluster.from_json(json.loads(json_str))\n\n @staticmethod\n def from_json(json_dict):\n converted_device2node = {\n int(k): int(v) for k, v in json_dict[\"device2node\"].items()\n }\n json_dict[\"device2node\"] = converted_device2node\n cluster = DynaPipeCluster(\n json_dict[\"device2node\"],\n json_dict[\"memory_limits\"],\n json_dict[\"intra_node_bw\"],\n json_dict[\"inter_node_bw\"],\n json_dict[\"intra_node_lat\"],\n json_dict[\"inter_node_lat\"],\n )\n return cluster" }, { "identifier": "TransformerModelSpec", "path": "dynapipe/model.py", "snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_encoder_layers: int\n n_decoder_layers: int\n hidden_dim: int\n num_attn_heads: int\n mlp_hidden_dim: Union[None, int] = None\n kv_channels: Union[None, int] = None\n bytes_per_element: int = 2\n optimizer_state_multiplier: int = 12\n\n def __post_init__(self):\n if self.mlp_hidden_dim is None:\n # if not specified, use the 4x hidden dim as it is the norm\n self.mlp_hidden_dim = self.hidden_dim * 4\n if self.kv_channels is None:\n # if not specified, use the hidden_dim // num_attn_heads\n assert self.hidden_dim % self.num_attn_heads == 0\n self.kv_channels = self.hidden_dim // self.num_attn_heads\n\n def serialize(self) -> bytes:\n def _serialize_int(x: int):\n return x.to_bytes(4, \"little\")\n\n return b\"\".join(\n [\n _serialize_int(x)\n for x in [\n self.n_encoder_layers,\n self.n_decoder_layers,\n self.hidden_dim,\n self.num_attn_heads,\n self.mlp_hidden_dim,\n self.kv_channels,\n self.bytes_per_element,\n self.optimizer_state_multiplier,\n ]\n ]\n )\n\n @classmethod\n def deserialize(cls, data: bytes):\n def _deserialize_int(data: bytes):\n return int.from_bytes(data, \"little\")\n\n return cls(\n *[_deserialize_int(data[i * 4 : (i + 1) * 4]) for i in range(8)]\n )" }, { "identifier": "deserialize_list_of_eps", "path": "dynapipe/pipe/instructions.py", "snippet": "def deserialize_list_of_eps(\n bytes: bytes, config=SerializationConfig(), deserialize_inner=True\n) -> Tuple[List[Union[ExecutionPlan, bytes]]]:\n \"\"\"Deserialize a list of execution plans from a byte array.\"\"\"\n n_eps = int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES],\n config.BYTES_ENDIANNESS,\n )\n bytes = bytes[config.EXECUTION_PLAN_META_BYTES :]\n eps = []\n for _ in range(n_eps):\n ep_bytes_len = int.from_bytes(\n bytes[: config.SERIALIZED_SIZE_BYTES],\n config.BYTES_ENDIANNESS,\n )\n bytes = bytes[config.SERIALIZED_SIZE_BYTES :]\n ep_bytes = bytes[:ep_bytes_len]\n if deserialize_inner:\n ep = ExecutionPlan.deserialize(ep_bytes, config=config)\n eps.append(ep)\n else:\n eps.append(ep_bytes)\n bytes = bytes[ep_bytes_len:]\n assert len(bytes) == 0\n return eps" }, { "identifier": "serialize_list_of_eps", "path": "dynapipe/pipe/instructions.py", "snippet": "def serialize_list_of_eps(\n eps: List[ExecutionPlan], config=SerializationConfig()\n) -> bytes:\n \"\"\"Serialize a list of execution plans to a byte array.\"\"\"\n result = len(eps).to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n for ep in eps:\n ep_bytes = ep.serialize(config)\n ep_bytes_len = len(ep_bytes).to_bytes(\n config.SERIALIZED_SIZE_BYTES, config.BYTES_ENDIANNESS\n )\n result += ep_bytes_len + ep_bytes\n\n return result" }, { "identifier": "ExecutionPlanner", "path": "dynapipe/schedule_opt/execution_planner.py", "snippet": "class ExecutionPlanner:\n def __init__(\n self,\n cluster_spec: DynaPipeCluster,\n model_spec: TransformerModelSpec,\n device_assignment: List[int],\n device_memory_limit: int,\n cost_model: ProfileBasedCostModelWithRC,\n dp_size: int = 1,\n tp_size: int = 1,\n zero_stage: int = 0,\n logger: Optional[logging.Logger] = None,\n ) -> None:\n self.cluster_spec = cluster_spec\n self.model_spec = model_spec\n self.cost_model = cost_model\n self.device_assignment = device_assignment\n self.n_devices = max(device_assignment) + 1\n self.device_memory_limit = device_memory_limit\n self.dp_size = dp_size\n self.tp_size = tp_size\n self.zero_stage = zero_stage\n self.logger = logger\n (\n self.device_assignment_type,\n self.valid_schedule_methods,\n self.n_layers_per_stage,\n self.n_chunks_per_device,\n ) = validate_device_assignment(\n model_spec, cluster_spec, self.device_assignment\n )\n\n def _create_candidates(\n self,\n batch: List[Tuple[int, int, int]],\n schedule_method=\"dynamic\",\n rc_type=None,\n ):\n if rc_type is not None:\n if not isinstance(rc_type, list):\n available_rc_types = [rc_type]\n else:\n available_rc_types = rc_type\n else:\n available_rc_types = [\"none\", \"selective\", \"full\"]\n if schedule_method == \"dynamic\":\n sch_methods = self.valid_schedule_methods\n spec_args = []\n for rc_type in available_rc_types:\n for sch in sch_methods:\n spec_args.append((sch, rc_type))\n else:\n if schedule_method not in self.valid_schedule_methods:\n raise ValueError(\n \"Invalid schedule scheme: \"\n \"{} for device assignment: {}\".format(\n schedule_method, self.device_assignment\n )\n )\n spec_args = [\n (schedule_method, rc_type) for rc_type in available_rc_types\n ]\n candidates = []\n for schedule_method, rc_type in spec_args:\n minibatch_spec = construct_minibatch_spec(\n self.model_spec,\n self.cost_model,\n batch,\n rc_type,\n dp_size=self.dp_size,\n tp_size=self.tp_size,\n zero_stage=self.zero_stage,\n )\n if minibatch_spec is not None:\n candidates.append((schedule_method, rc_type, minibatch_spec))\n return candidates\n\n def _optimize_instructions(\n self,\n instructions: List[List[PipeInstruction]],\n n_stages: int,\n ):\n # instructions: instructions for each executor\n # Necessary steps to ensure correctness:\n # 1. Add CommunicationFinishInsturctions at appropriate places\n # 2. Allocate buffer slots (not buffer themselves)\n # Potential optimizations:\n # 1. Merge consecutive communication instructions (trade-off)\n # 2. Reschedule communication instructions\n # 3. Pre-allocate buffers to reduce memory fragmentation\n instrs, n_buffers = InstructionOptimizer(\n instructions, n_stages\n ).optimize()\n return instrs, n_buffers\n\n def generate_execution_plan(\n self,\n batch: List[Tuple[int, int, int]],\n limit_rc_type=None,\n schedule_method=\"dynamic\",\n disable_permute_microbatches=False,\n disable_scheduler_memory_limit=False,\n current_batch_idx=None,\n ):\n candidates = self._create_candidates(\n batch, schedule_method=schedule_method, rc_type=limit_rc_type\n )\n best_instrs = None\n best_sch = None\n best_rc = None\n best_cost = None\n best_stats = None\n for schedule_method, rc_type, minibatch_spec in candidates:\n (\n max_makespan,\n _,\n _,\n min_makespan,\n min_stats,\n min_instructions,\n ) = optimize_schedule(\n schedule_method,\n minibatch_spec,\n self.cluster_spec,\n self.device_assignment,\n try_permutations=not disable_permute_microbatches,\n include_memory_stats=True,\n progress_bar=False,\n memory_limit=self.device_memory_limit,\n disable_scheduler_memory_limit=disable_scheduler_memory_limit,\n raise_on_oom=False,\n rc_type=rc_type,\n logger=self.logger,\n )\n if max_makespan < 1e-5:\n # no feasible schedule\n if self.logger:\n self.logger.debug(\n \"No feasible schedule for batch {} \"\n \"using {} and recompute {}\".format(\n current_batch_idx, schedule_method, rc_type\n )\n )\n continue\n if best_cost is None or min_makespan < best_cost:\n best_cost = min_makespan\n best_sch = schedule_method\n best_rc = rc_type\n best_instrs = min_instructions\n best_stats = min_stats\n if best_instrs is None:\n raise RuntimeError(\n \"No feasible schedule for batch {}.\".format(current_batch_idx)\n )\n # get total number of stages\n best_instrs: List[List[PipeInstruction]]\n n_stages = (\n max([instr.stage for instrs in best_instrs for instr in instrs])\n + 1\n )\n assigned_stages_per_executor = []\n for instrs in best_instrs:\n assigned_stages = set()\n for instr in instrs:\n assigned_stages.add(instr.stage)\n assigned_stages = sorted(list(assigned_stages))\n assigned_stages_per_executor.append(assigned_stages)\n # construct execution plan\n if best_cost is None:\n # no feasible schedule\n return None, None, None, None, None\n assert len(best_instrs) == self.n_devices\n # run necessary optimization pass on instructions\n optimized_instrs, n_buffers = self._optimize_instructions(\n best_instrs, n_stages\n )\n execution_plans = [\n ExecutionPlan(\n instr,\n len(batch),\n self.n_devices,\n n_stages,\n i,\n assigned_stages_per_executor[i],\n name_to_recompute_method(best_rc),\n n_buffer,\n )\n for i, (instr, n_buffer) in enumerate(\n zip(optimized_instrs, n_buffers)\n )\n ]\n return execution_plans, best_cost, best_stats, best_rc, best_sch" }, { "identifier": "create_logger", "path": "dynapipe/utils/logger.py", "snippet": "class DynaPipeFormatter(logging.Formatter):\nclass LoggerWriter(object):\n def __init__(self, prefix=None, distributed_rank=None, colored=True):\n def _get_fmt_colored(self, level):\n def _get_fmt(self):\n def format(self, record):\n def __init__(self, writers):\n def write(self, message: str):\n def flush(self):\ndef create_logger(\n name=None,\n prefix=None,\n level=_default_logging_level,\n distributed_rank=None,\n log_file=None,\n):" }, { "identifier": "RedisKVStore", "path": "dynapipe/pipe/kv_redis.py", "snippet": "class RedisKVStore(object):\n # a blocking redis client\n def __init__(self, host, port, is_master=False):\n self.is_master = is_master\n self.host = host\n self.port = port\n if self.is_master:\n self.server = self._run_redis_server()\n # wait for redis server to start\n t = time.time()\n while True:\n try:\n self.client = redis.Redis(host=host, port=port, db=0)\n self.client.ping()\n break\n except redis.exceptions.ConnectionError:\n time.sleep(KVREDIS_POLLING_INTERVAL)\n if time.time() - t > KVREDIS_CONNECT_TIMEOUT:\n raise RuntimeError(\n \"WARNING: Cannot connect to KV Server. \"\n \"Is DYNAPIPE_KV_HOST and \"\n \"DYNAPIPE_KV_PORT set correctly?\"\n )\n continue\n # register cleanup\n atexit.register(self.__del__)\n\n def __del__(self):\n if self.is_master:\n if self.server.poll() is not None:\n return\n self.server.send_signal(subprocess.signal.SIGINT)\n self.server.wait()\n\n def _run_redis_server(self):\n # run a redis server\n p = subprocess.Popen(\n [\n REDIS_CMD,\n \"--save\",\n \"\",\n \"--port\",\n str(self.port),\n \"--bind\",\n str(self.host),\n ],\n shell=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n return p\n\n def wait(self, keys, timeout=None):\n # wait for a key to be set\n time_start = datetime.datetime.now()\n if not isinstance(keys, (list, tuple)):\n keys = [keys]\n while True:\n if self.client.exists(*keys):\n break\n if (\n timeout is not None\n and datetime.datetime.now() - time_start > timeout\n ):\n # match torch kvstore behavior\n raise RuntimeError(\"Timeout\")\n time.sleep(KVREDIS_POLLING_INTERVAL)\n\n def get(self, key, wait=True):\n if wait:\n self.wait(key)\n return self.client.get(key)\n\n def set(self, key, value: str, logger=None):\n # match torch kvstore behavior\n value_bytes = value.encode()\n self.client.set(key, value_bytes)\n if logger:\n logger.debug(\"KVStore: set {} to {}\".format(key, value))\n\n def add(self, key, value: int):\n # match torch kvstore behavior\n return self.client.incr(key, value)\n\n def delete_key(self, key):\n return self.client.delete(key)" }, { "identifier": "validate_device_assignment", "path": "dynapipe/pipe/utils.py", "snippet": "def validate_device_assignment(\n model_spec: TransformerModelSpec,\n cluster_spec: DynaPipeCluster,\n device_assignment: List[int],\n):\n \"\"\"\n Validate device assignment and detect device assignment type.\n Args:\n device_assignment: List of device ids for each layer.\n \"\"\"\n appeared_devices = set()\n for device in device_assignment:\n if device not in appeared_devices:\n # new device\n assert device == len(appeared_devices), (\n \"Devices must appear in indexed order. \"\n \"e.g. [0, 1, 2, 3] is valid, \"\n \"[0, 1, 3, 2] is not valid.\"\n )\n appeared_devices.add(device)\n n_devices = len(appeared_devices)\n assert n_devices == cluster_spec.n_devices, (\n \"Number of devices used in device assignment \"\n \"must be equal to number of devices in cluster spec.\"\n )\n virtual_layer_to_actual_layers = [[]]\n virtual_layer_devices = [0]\n last_device = 0\n for device in device_assignment:\n if device == last_device:\n virtual_layer_to_actual_layers[-1].append(device)\n else:\n virtual_layer_to_actual_layers.append([device])\n virtual_layer_devices.append(device)\n last_device = device\n n_actual_layers_per_virtual_layer = len(virtual_layer_to_actual_layers[0])\n for virtual_layer in virtual_layer_to_actual_layers:\n n_encoder_layers_in_virtual_layer = len(\n [\n layer\n for layer in virtual_layer\n if layer < model_spec.n_encoder_layers\n ]\n )\n n_decoder_layers_in_virtual_layer = (\n len(virtual_layer) - n_encoder_layers_in_virtual_layer\n )\n if n_encoder_layers_in_virtual_layer > 0:\n assert (\n len(virtual_layer) == n_encoder_layers_in_virtual_layer\n ), \"Number of layers on each virtual layer must be the same.\"\n if n_decoder_layers_in_virtual_layer > 0:\n assert (\n len(virtual_layer) == n_decoder_layers_in_virtual_layer\n ), \"Number of layers on each virtual layer must be the same.\"\n if len(device_assignment) != n_actual_layers_per_virtual_layer:\n # only check if we are actually using pipeline parallelism\n assert (\n model_spec.n_encoder_layers % n_actual_layers_per_virtual_layer\n == 0\n ), (\n f\"Number of encoder layers ({model_spec.n_encoder_layers}) \"\n f\"must be divisible by number of layers on each virtual layer \"\n f\"({n_actual_layers_per_virtual_layer}).\"\n )\n assert (\n model_spec.n_decoder_layers % n_actual_layers_per_virtual_layer\n == 0\n ), (\n f\"Number of decoder layers ({model_spec.n_decoder_layers}) \"\n f\"must be divisible by number of layers on each virtual layer \"\n f\"({n_actual_layers_per_virtual_layer}).\"\n )\n # classify device assignment into linear, interleaved and other\n device_assignment_type = \"other\"\n if len(virtual_layer_devices) == n_devices:\n if virtual_layer_devices == list(range(n_devices)):\n device_assignment_type = \"linear\"\n else:\n n_chunks = len(virtual_layer_devices) // n_devices\n interleaved_assignment = list(range(n_devices)) * n_chunks\n if interleaved_assignment == virtual_layer_devices:\n device_assignment_type = \"interleaved\"\n if (\n device_assignment_type == \"interleaved\"\n and model_spec.n_decoder_layers == 0\n ):\n # interleaved device assignment is not supported for decoder only\n # models\n raise NotImplementedError(\n \"Interleaved device assignment is not supported \"\n \"for decoder only models.\"\n )\n valid_schedule_methods = [\"wait-free-cyclic\"]\n if device_assignment_type == \"linear\" and n_devices > 1:\n valid_schedule_methods.append(\"1F1B\")\n elif device_assignment_type == \"interleaved\":\n valid_schedule_methods.append(\"interleaved-1F1B\")\n n_chunks_per_device = len(virtual_layer_devices) // n_devices\n return (\n device_assignment_type,\n valid_schedule_methods,\n n_actual_layers_per_virtual_layer,\n n_chunks_per_device,\n )" } ]
import json import logging import multiprocessing as mp import os import time import traceback import torch import pickle from dataclasses import dataclass, field, fields from queue import Empty from typing import List, Optional from torch.utils.data import DataLoader as PTDataLoader from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC from dynapipe.data_opt.optimizer import DataAssignmentOptimizer from dynapipe.model import DynaPipeCluster, TransformerModelSpec from dynapipe.pipe.instructions import ( deserialize_list_of_eps, serialize_list_of_eps, ) from dynapipe.schedule_opt.execution_planner import ExecutionPlanner from dynapipe.utils.logger import create_logger, logger from .kv_redis import RedisKVStore from .utils import validate_device_assignment
13,074
def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None exec_planner: Optional[ExecutionPlanner] = None partition_method: Optional[str] = None token_based_partition_mbs: Optional[int] = None disable_tsp: Optional[bool] = None schedule_method: Optional[str] = None disable_mb_permutation: Optional[bool] = None disable_scheduler_memory_limit: Optional[bool] = None enable_packing: Optional[bool] = None n_layers_per_stage: Optional[int] = None assigned_iters_per_node: Optional[int] = None node_size: Optional[int] = None def __post_init__(self): if self.node_rank is None: raise RuntimeError("node_rank must be set at initialization.") if self.profile_path is None: raise RuntimeError("profile_path must be set at initialization.") @dataclass class DataloaderWorkerData(WorkerData): # required at initialization: dp_rank: Optional[int] = None pp_rank: Optional[int] = None virtual_pp_rank: Optional[int] = None # filled later in worker init: dp_size: Optional[int] = None pp_size: Optional[int] = None virtual_pp_size: Optional[int] = None def __post_init__(self): if self.dp_rank is None: raise RuntimeError("dp_rank must be set at initialization.") if self.pp_rank is None: raise RuntimeError("pp_rank must be set at initialization.") if self.virtual_pp_rank is None: raise RuntimeError( "virtual_pp_rank must be " "set at initialization." ) class KVStoreMetaKeys: DP_SIZE = "data_parallel_size" TP_SIZE = "tensor_parallel_size" PP_SIZE = "pipeline_parallel_size" VIRTUAL_PP_SIZE = "virtual_pipeline_parallel_size" ZERO_STAGE = "zero_stage" NODE_SIZE = "node_size" MODEL_SPEC = "model_spec" N_EXECS = "n_executors" N_LAYERS_PER_STAGE = "n_layers_per_stage" N_CHUNKS_PER_DEVICE = "n_chunks_per_device" DEVICE_MEMORY_LIMIT = "device_memory_limit" PARTITION_METHOD = "partition_method" TOKEN_BASED_PARTITION_MBS = "token_based_partition_mbs" DISABLE_TSP = "disable_tsp" SCHEDULE_METHOD = "schedule_method" DISABLE_MB_PERMUTATION = "disable_mb_permutation" DISABLE_SCHEDULER_MEMORY_LIMIT = "disable_scheduler_memory_limit" ENABLE_PACKING = "enable_packing" PER_MB_MEM_FRAC = "per_mb_memory_fraction" CLUSTER_SPEC = "cluster_spec" DEV_ASSIGNMENT = "device_assignment" KV_BUFFER_SIZE = "kv_buffer_size" ROUND_SEQLEN_MULT = "round_seqlen_multiple" ASSIGNED_ITER_PER_NODE = "assigned_iters_per_node" SEQLEN_OFFSET = "seqlen_offset" MODEL_TYPE = "model_type" # used outside dataloader N_ITERS = "n_iters" @dataclass class TrainingSpec: cm_path: str cluster_spec: DynaPipeCluster
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 MANAGER_PROCESS_TIMEOUT = 1 RECEIVER_PROCESS_TIMEOUT = 1 KVSTORE_TIMEOUT = 1800 # 30 minutes # ONLY USED FOR DEBUG PURPOSES DEBUG_USE_DUMMY_EP = False DEBUG_DUMP_EP_STATS = os.getenv( "DYNAPIPE_DEBUG_DUMP_EP_STATS", "False" ).lower() in ("true", "1", "t") DEBUG_DUMP_EP_PREFIX = os.environ.get("DYNAPIPE_DEBUG_DUMP_EP_PREFIX", None) if DEBUG_DUMP_EP_STATS and DEBUG_DUMP_EP_PREFIX is None: raise ValueError( "DYNAPIPE_DEBUG_DUMP_EP_PREFIX must be set if " "DYNAPIPE_DEBUG_DUMP_EP_STATS is set." ) _kvstore_handle = None def _init_kv_store(is_master, logger=None): host = os.environ.get("DYNAPIPE_KV_HOST", "localhost") port = os.environ.get("DYNAPIPE_KV_PORT", 29500) if logger is not None: logger.debug( "Init kv store, is_master: {}, host: {}, port: {}".format( is_master, host, port ) ) # kv_store = torch.distributed.TCPStore( # "127.0.0.1", # port, # is_master=is_master, # timeout=timedelta(seconds=KVSTORE_TIMEOUT), # ) kv_store = RedisKVStore(host, port, is_master=is_master) return kv_store, host, port def _checked_delete_key(kv_store: RedisKVStore, key: str, logger=None): result = kv_store.delete_key(key) if not result: raise RuntimeError( "Internal error: failed to delete key " "{}.".format(key) ) if logger is not None: logger.debug("Deleted key: {}".format(key)) def _get_from_shared_kv_store( kv_store: RedisKVStore, key: str, reader_idx: int, n_total_readers: int, decode: bool = True, logger=None, ): reader_count_key = key + "_rc" reader_ack_key = key + "_r{}_ack".format(reader_idx) # wait for reader ack if logger is not None: logger.debug("Waiting for reader ack key: {}".format(reader_ack_key)) kv_store.get(reader_ack_key) if logger is not None: logger.debug( "Got reader ack key: {}, waiting for data key: {}".format( reader_ack_key, key ) ) data = kv_store.get(key) if logger is not None: logger.debug("Removing reader ack key: {}".format(reader_ack_key)) # remove reader ack _checked_delete_key(kv_store, reader_ack_key, logger=logger) # get reader count reader_count = kv_store.add(reader_count_key, 1) if reader_count == n_total_readers: if logger is not None: logger.debug( "Last reader, reset reader count: {}".format(reader_count_key) ) # reset reader count result_readers = kv_store.add(reader_count_key, -n_total_readers) assert result_readers == 0 if logger is not None: logger.debug("Last reader, remove data key: {}".format(key)) # remove data key _checked_delete_key(kv_store, key, logger=logger) if logger is not None: logger.debug("Last reader, set ack key: {}".format(key + "_ack")) # set all reader ack keys keys_to_reset = [ key + "_r{}_ack".format(i) for i in range(n_total_readers) ] if logger is not None: logger.debug("Last reader, reset keys: {}".format(keys_to_reset)) for reset_key in keys_to_reset: val = kv_store.add(reset_key, 1) # make sure the key is set got_val = int(kv_store.get(reset_key).decode()) if not val == got_val: raise RuntimeError( "Failed to set reader ack key: {}".format(reset_key) ) if logger is not None: logger.debug("Set reader ack key: {}".format(reset_key)) # set data ack key kv_store.add(key + "_ack", 1) if decode: return data.decode() return data def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None exec_planner: Optional[ExecutionPlanner] = None partition_method: Optional[str] = None token_based_partition_mbs: Optional[int] = None disable_tsp: Optional[bool] = None schedule_method: Optional[str] = None disable_mb_permutation: Optional[bool] = None disable_scheduler_memory_limit: Optional[bool] = None enable_packing: Optional[bool] = None n_layers_per_stage: Optional[int] = None assigned_iters_per_node: Optional[int] = None node_size: Optional[int] = None def __post_init__(self): if self.node_rank is None: raise RuntimeError("node_rank must be set at initialization.") if self.profile_path is None: raise RuntimeError("profile_path must be set at initialization.") @dataclass class DataloaderWorkerData(WorkerData): # required at initialization: dp_rank: Optional[int] = None pp_rank: Optional[int] = None virtual_pp_rank: Optional[int] = None # filled later in worker init: dp_size: Optional[int] = None pp_size: Optional[int] = None virtual_pp_size: Optional[int] = None def __post_init__(self): if self.dp_rank is None: raise RuntimeError("dp_rank must be set at initialization.") if self.pp_rank is None: raise RuntimeError("pp_rank must be set at initialization.") if self.virtual_pp_rank is None: raise RuntimeError( "virtual_pp_rank must be " "set at initialization." ) class KVStoreMetaKeys: DP_SIZE = "data_parallel_size" TP_SIZE = "tensor_parallel_size" PP_SIZE = "pipeline_parallel_size" VIRTUAL_PP_SIZE = "virtual_pipeline_parallel_size" ZERO_STAGE = "zero_stage" NODE_SIZE = "node_size" MODEL_SPEC = "model_spec" N_EXECS = "n_executors" N_LAYERS_PER_STAGE = "n_layers_per_stage" N_CHUNKS_PER_DEVICE = "n_chunks_per_device" DEVICE_MEMORY_LIMIT = "device_memory_limit" PARTITION_METHOD = "partition_method" TOKEN_BASED_PARTITION_MBS = "token_based_partition_mbs" DISABLE_TSP = "disable_tsp" SCHEDULE_METHOD = "schedule_method" DISABLE_MB_PERMUTATION = "disable_mb_permutation" DISABLE_SCHEDULER_MEMORY_LIMIT = "disable_scheduler_memory_limit" ENABLE_PACKING = "enable_packing" PER_MB_MEM_FRAC = "per_mb_memory_fraction" CLUSTER_SPEC = "cluster_spec" DEV_ASSIGNMENT = "device_assignment" KV_BUFFER_SIZE = "kv_buffer_size" ROUND_SEQLEN_MULT = "round_seqlen_multiple" ASSIGNED_ITER_PER_NODE = "assigned_iters_per_node" SEQLEN_OFFSET = "seqlen_offset" MODEL_TYPE = "model_type" # used outside dataloader N_ITERS = "n_iters" @dataclass class TrainingSpec: cm_path: str cluster_spec: DynaPipeCluster
model_spec: TransformerModelSpec
3
2023-11-08 07:58:20+00:00
16k
SqueezeAILab/LLMCompiler
src/llm_compiler/llm_compiler.py
[ { "identifier": "AsyncStatsCallbackHandler", "path": "src/callbacks/callbacks.py", "snippet": "class AsyncStatsCallbackHandler(AsyncCallbackHandler):\n \"\"\"Collect useful stats about the run.\n Add more stats as needed.\"\"\"\n\n def __init__(self, stream: bool = False) -> None:\n super().__init__()\n self.cnt = 0\n self.input_tokens = 0\n self.output_tokens = 0\n # same for gpt-3.5\n self.encoder = tiktoken.encoding_for_model(\"gpt-4\")\n self.stream = stream\n self.all_times = []\n self.start_time = 0\n\n async def on_chat_model_start(self, serialized, prompts, **kwargs):\n self.start_time = time.time()\n if self.stream:\n # if streaming mode, on_llm_end response is not collected\n # therefore, we need to count input token based on the\n # prompt length at the beginning\n self.cnt += 1\n self.input_tokens += len(self.encoder.encode(prompts[0][0].content))\n\n async def on_llm_new_token(self, token, *args, **kwargs):\n if self.stream:\n # if streaming mode, on_llm_end response is not collected\n # therefore, we need to manually count output token based on the\n # number of streamed out tokens\n self.output_tokens += 1\n\n async def on_llm_end(self, response, *args, **kwargs):\n self.all_times.append(round(time.time() - self.start_time, 2))\n if not self.stream:\n # if not streaming mode, on_llm_end response is collected\n # so we can use this stats directly\n token_usage = response.llm_output[\"token_usage\"]\n self.input_tokens += token_usage[\"prompt_tokens\"]\n self.output_tokens += token_usage[\"completion_tokens\"]\n self.cnt += 1\n\n def reset(self) -> None:\n self.cnt = 0\n self.input_tokens = 0\n self.output_tokens = 0\n self.all_times = []\n\n def get_stats(self) -> dict[str, int]:\n return {\n \"calls\": self.cnt,\n \"input_tokens\": self.input_tokens,\n \"output_tokens\": self.output_tokens,\n \"all_times\": self.all_times,\n }" }, { "identifier": "Chain", "path": "src/chains/chain.py", "snippet": "class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):\n \"\"\"Abstract base class for creating structured sequences of calls to components.\n\n Chains should be used to encode a sequence of calls to components like\n models, document retrievers, other chains, etc., and provide a simple interface\n to this sequence.\n\n Copied from langchain v0.0.283.\n\n The Chain interface makes it easy to create apps that are:\n - Stateful: add Memory to any Chain to give it state,\n - Observable: pass Callbacks to a Chain to execute additional functionality,\n like logging, outside the main sequence of component calls,\n - Composable: the Chain API is flexible enough that it is easy to combine\n Chains with other components, including other Chains.\n\n The main methods exposed by chains are:\n - `__call__`: Chains are callable. The `__call__` method is the primary way to\n execute a Chain. This takes inputs as a dictionary and returns a\n dictionary output.\n - `run`: A convenience method that takes inputs as args/kwargs and returns the\n output as a string or object. This method can only be used for a subset of\n chains and cannot return as rich of an output as `__call__`.\n \"\"\"\n\n def invoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n config = config or {}\n return self(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n async def ainvoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n if type(self)._acall == Chain._acall:\n # If the chain does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n config = config or {}\n return await self.acall(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n memory: Optional[BaseMemory] = None\n \"\"\"Optional memory object. Defaults to None.\n Memory is a class that gets called at the start\n and at the end of every chain. At the start, memory loads variables and passes\n them along in the chain. At the end, it saves any returned variables.\n There are many different types of memory - please see memory docs\n for the full catalog.\"\"\"\n callbacks: Callbacks = Field(default=None, exclude=True)\n \"\"\"Optional list of callback handlers (or callback manager). Defaults to None.\n Callback handlers are called throughout the lifecycle of a call to a chain,\n starting with on_chain_start, ending with on_chain_end or on_chain_error.\n Each custom chain can optionally call additional callback methods, see Callback docs\n for full details.\"\"\"\n callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)\n \"\"\"Deprecated, use `callbacks` instead.\"\"\"\n verbose: bool = Field(default_factory=_get_verbosity)\n \"\"\"Whether or not run in verbose mode. In verbose mode, some intermediate logs\n will be printed to the console. Defaults to `langchain.verbose` value.\"\"\"\n tags: Optional[List[str]] = None\n \"\"\"Optional list of tags associated with the chain. Defaults to None.\n These tags will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n metadata: Optional[Dict[str, Any]] = None\n \"\"\"Optional metadata associated with the chain. Defaults to None.\n This metadata will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def _chain_type(self) -> str:\n raise NotImplementedError(\"Saving not supported for this chain type.\")\n\n @root_validator()\n def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:\n \"\"\"Raise deprecation warning if callback_manager is used.\"\"\"\n if values.get(\"callback_manager\") is not None:\n if values.get(\"callbacks\") is not None:\n raise ValueError(\n \"Cannot specify both callback_manager and callbacks. \"\n \"callback_manager is deprecated, callbacks is the preferred \"\n \"parameter to pass in.\"\n )\n warnings.warn(\n \"callback_manager is deprecated. Please use callbacks instead.\",\n DeprecationWarning,\n )\n values[\"callbacks\"] = values.pop(\"callback_manager\", None)\n return values\n\n @validator(\"verbose\", pre=True, always=True)\n def set_verbose(cls, verbose: Optional[bool]) -> bool:\n \"\"\"Set the chain verbosity.\n\n Defaults to the global setting if not specified by the user.\n \"\"\"\n if verbose is None:\n return _get_verbosity()\n else:\n return verbose\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain input.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def output_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain output.\"\"\"\n raise NotImplementedError\n\n def _validate_inputs(self, inputs: Dict[str, Any]) -> None:\n \"\"\"Check that all inputs are present.\"\"\"\n missing_keys = set(self.input_keys).difference(inputs)\n if missing_keys:\n raise ValueError(f\"Missing some input keys: {missing_keys}\")\n\n def _validate_outputs(self, outputs: Dict[str, Any]) -> None:\n missing_keys = set(self.output_keys).difference(outputs)\n if missing_keys:\n raise ValueError(f\"Missing some output keys: {missing_keys}\")\n\n @abstractmethod\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.__call__`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.acall`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError(\"Async call not supported for this chain type.\")\n\n def __call__(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = CallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._call).parameters.get(\"run_manager\")\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n self._call(inputs, run_manager=run_manager)\n if new_arg_supported\n else self._call(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n async def acall(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = AsyncCallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._acall).parameters.get(\"run_manager\")\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n await self._acall(inputs, run_manager=run_manager)\n if new_arg_supported\n else await self._acall(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n await run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n def prep_outputs(\n self,\n inputs: Dict[str, str],\n outputs: Dict[str, str],\n return_only_outputs: bool = False,\n ) -> Dict[str, str]:\n \"\"\"Validate and prepare chain outputs, and save info about this run to memory.\n\n Args:\n inputs: Dictionary of chain inputs, including any inputs added by chain\n memory.\n outputs: Dictionary of initial chain outputs.\n return_only_outputs: Whether to only return the chain outputs. If False,\n inputs are also added to the final outputs.\n\n Returns:\n A dict of the final chain outputs.\n \"\"\"\n self._validate_outputs(outputs)\n if self.memory is not None:\n self.memory.save_context(inputs, outputs)\n if return_only_outputs:\n return outputs\n else:\n return {**inputs, **outputs}\n\n def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:\n \"\"\"Validate and prepare chain inputs, including adding inputs from memory.\n\n Args:\n inputs: Dictionary of raw inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n\n Returns:\n A dictionary of all inputs, including those added by the chain's memory.\n \"\"\"\n if not isinstance(inputs, dict):\n _input_keys = set(self.input_keys)\n if self.memory is not None:\n # If there are multiple input keys, but some get set by memory so that\n # only one is not set, we can still figure out which key it is.\n _input_keys = _input_keys.difference(self.memory.memory_variables)\n if len(_input_keys) != 1:\n raise ValueError(\n f\"A single string input was passed in, but this chain expects \"\n f\"multiple inputs ({_input_keys}). When a chain expects \"\n f\"multiple inputs, please call it by passing in a dictionary, \"\n \"eg `chain({'foo': 1, 'bar': 2})`\"\n )\n inputs = {list(_input_keys)[0]: inputs}\n if self.memory is not None:\n external_context = self.memory.load_memory_variables(inputs)\n inputs = dict(inputs, **external_context)\n self._validate_inputs(inputs)\n return inputs\n\n @property\n def _run_output_key(self) -> str:\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n return self.output_keys[0]\n\n def run(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n chain.run(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n chain.run(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n # Run at start to make sure this is possible/defined\n _output_key = self._run_output_key\n\n if args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if kwargs and not args:\n return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if not kwargs and not args:\n raise ValueError(\n \"`run` supported with either positional arguments or keyword arguments,\"\n \" but none were provided.\"\n )\n else:\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n async def arun(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n await chain.arun(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n await chain.arun(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n elif args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return (\n await self.acall(\n args[0], callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n if kwargs and not args:\n return (\n await self.acall(\n kwargs, callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Dictionary representation of chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n **kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`\n method.\n\n Returns:\n A dictionary representation of the chain.\n\n Example:\n .. code-block:: python\n\n chain.dict(exclude_unset=True)\n # -> {\"_type\": \"foo\", \"verbose\": False, ...}\n \"\"\"\n if self.memory is not None:\n raise ValueError(\"Saving of memory is not yet supported.\")\n _dict = super().dict(**kwargs)\n _dict[\"_type\"] = self._chain_type\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n file_path: Path to file to save the chain to.\n\n Example:\n .. code-block:: python\n\n chain.save(file_path=\"path/chain.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n # Fetch dictionary to save\n chain_dict = self.dict()\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(chain_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(chain_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Call the chain on all inputs in the list.\"\"\"\n return [self(inputs, callbacks=callbacks) for inputs in input_list]" }, { "identifier": "JOINNER_REPLAN", "path": "src/llm_compiler/constants.py", "snippet": "JOINNER_REPLAN = \"Replan\"" }, { "identifier": "Planner", "path": "src/llm_compiler/planner.py", "snippet": "class Planner:\n def __init__(\n self,\n llm: BaseChatModel,\n example_prompt: str,\n example_prompt_replan: str,\n tools: Sequence[Union[Tool, StructuredTool]],\n stop: Optional[list[str]],\n ):\n self.llm = llm\n # different system prompt is needed when replanning\n # since they have different guidelines, and also examples provided by the user\n self.system_prompt = generate_llm_compiler_prompt(\n tools=tools,\n example_prompt=example_prompt,\n is_replan=False,\n )\n self.system_prompt_replan = generate_llm_compiler_prompt(\n tools=tools,\n example_prompt=example_prompt_replan,\n is_replan=True,\n )\n self.tools = tools\n self.output_parser = LLMCompilerPlanParser(tools=tools)\n self.stop = stop\n\n async def run_llm(\n self,\n inputs: dict[str, Any],\n is_replan: bool = False,\n callbacks: Callbacks = None,\n ) -> str:\n \"\"\"Run the LLM.\"\"\"\n if is_replan:\n system_prompt = self.system_prompt_replan\n assert \"context\" in inputs, \"If replanning, context must be provided\"\n human_prompt = f\"Question: {inputs['input']}\\n{inputs['context']}\\n\"\n else:\n system_prompt = self.system_prompt\n human_prompt = f\"Question: {inputs['input']}\"\n\n messages = [\n SystemMessage(content=system_prompt),\n HumanMessage(content=human_prompt),\n ]\n\n llm_response = await self.llm._call_async(\n messages,\n callbacks=callbacks,\n stop=self.stop,\n )\n log(\"LLMCompiler planner response: \\n\", llm_response.content, block=True)\n\n return llm_response.content\n\n async def plan(\n self, inputs: dict, is_replan: bool, callbacks: Callbacks = None, **kwargs: Any\n ):\n llm_response = await self.run_llm(\n inputs=inputs, is_replan=is_replan, callbacks=callbacks\n )\n llm_response = llm_response + \"\\n\"\n return self.output_parser.parse(llm_response)\n\n async def aplan(\n self,\n inputs: dict,\n task_queue: asyncio.Queue[Optional[str]],\n is_replan: bool,\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Plan:\n \"\"\"Given input, asynchronously decide what to do.\"\"\"\n all_callbacks = [\n LLMCompilerCallback(\n queue=task_queue,\n tools=self.tools,\n )\n ]\n if callbacks:\n all_callbacks.extend(callbacks)\n await self.run_llm(inputs=inputs, is_replan=is_replan, callbacks=all_callbacks)" }, { "identifier": "Task", "path": "src/llm_compiler/task_fetching_unit.py", "snippet": "class Task:\n idx: int\n name: str\n tool: Callable\n args: Collection[Any]\n dependencies: Collection[int]\n stringify_rule: Optional[Callable] = None\n thought: Optional[str] = None\n observation: Optional[str] = None\n is_join: bool = False\n\n async def __call__(self) -> Any:\n log(\"running task\")\n x = await self.tool(*self.args)\n log(\"done task\")\n return x\n\n def get_though_action_observation(\n self, include_action=True, include_thought=True, include_action_idx=False\n ) -> str:\n thought_action_observation = \"\"\n if self.thought and include_thought:\n thought_action_observation = f\"Thought: {self.thought}\\n\"\n if include_action:\n idx = f\"{self.idx}. \" if include_action_idx else \"\"\n if self.stringify_rule:\n # If the user has specified a custom stringify rule for the\n # function argument, use it\n thought_action_observation += f\"{idx}{self.stringify_rule(self.args)}\\n\"\n else:\n # Otherwise, we have a default stringify rule\n thought_action_observation += (\n f\"{idx}{self.name}\"\n f\"{_default_stringify_rule_for_arguments(self.args)}\\n\"\n )\n if self.observation is not None:\n thought_action_observation += f\"Observation: {self.observation}\\n\"\n return thought_action_observation" }, { "identifier": "TaskFetchingUnit", "path": "src/llm_compiler/task_fetching_unit.py", "snippet": "class TaskFetchingUnit:\n tasks: Dict[str, Task]\n tasks_done: Dict[str, asyncio.Event]\n remaining_tasks: set[str]\n\n def __init__(self):\n self.tasks = {}\n self.tasks_done = {}\n self.remaining_tasks = set()\n\n def set_tasks(self, tasks: dict[str, Any]):\n self.tasks.update(tasks)\n self.tasks_done.update({task_idx: asyncio.Event() for task_idx in tasks})\n self.remaining_tasks.update(set(tasks.keys()))\n\n def _all_tasks_done(self):\n return all(self.tasks_done[d].is_set() for d in self.tasks_done)\n\n def _get_all_executable_tasks(self):\n return [\n task_name\n for task_name in self.remaining_tasks\n if all(\n self.tasks_done[d].is_set() for d in self.tasks[task_name].dependencies\n )\n ]\n\n def _preprocess_args(self, task: Task):\n \"\"\"Replace dependency placeholders, i.e. ${1}, in task.args with the actual observation.\"\"\"\n args = []\n for arg in task.args:\n arg = _replace_arg_mask_with_real_value(arg, task.dependencies, self.tasks)\n args.append(arg)\n task.args = args\n\n async def _run_task(self, task: Task):\n self._preprocess_args(task)\n if not task.is_join:\n observation = await task()\n task.observation = observation\n self.tasks_done[task.idx].set()\n\n async def schedule(self):\n \"\"\"Run all tasks in self.tasks in parallel, respecting dependencies.\"\"\"\n # run until all tasks are done\n while not self._all_tasks_done():\n # Find tasks with no dependencies or with all dependencies met\n executable_tasks = self._get_all_executable_tasks()\n\n for task_name in executable_tasks:\n asyncio.create_task(self._run_task(self.tasks[task_name]))\n self.remaining_tasks.remove(task_name)\n\n await asyncio.sleep(SCHEDULING_INTERVAL)\n\n async def aschedule(self, task_queue: asyncio.Queue[Optional[Task]], func):\n \"\"\"Asynchronously listen to task_queue and schedule tasks as they arrive.\"\"\"\n no_more_tasks = False # Flag to check if all tasks are received\n\n while True:\n if not no_more_tasks:\n # Wait for a new task to be added to the queue\n task = await task_queue.get()\n\n # Check for sentinel value indicating end of tasks\n if task is None:\n no_more_tasks = True\n else:\n # Parse and set the new tasks\n self.set_tasks({task.idx: task})\n\n # Schedule and run executable tasks\n executable_tasks = self._get_all_executable_tasks()\n\n if executable_tasks:\n for task_name in executable_tasks:\n asyncio.create_task(self._run_task(self.tasks[task_name]))\n self.remaining_tasks.remove(task_name)\n elif no_more_tasks and self._all_tasks_done():\n # Exit the loop if no more tasks are expected and all tasks are done\n break\n else:\n # If no executable tasks are found, sleep for the SCHEDULING_INTERVAL\n await asyncio.sleep(SCHEDULING_INTERVAL)" }, { "identifier": "StructuredTool", "path": "src/tools/base.py", "snippet": "class StructuredTool(BaseTool):\n \"\"\"Tool that can operate on any number of inputs.\"\"\"\n\n description: str = \"\"\n args_schema: Type[BaseModel] = Field(..., description=\"The tool schema.\")\n \"\"\"The input arguments' schema.\"\"\"\n func: Optional[Callable[..., Any]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n return self.args_schema.schema()[\"properties\"]\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n return await asyncio.get_running_loop().run_in_executor(\n None,\n self._run,\n partial(self._run, run_manager=run_manager, **kwargs),\n *args,\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable] = None,\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n infer_schema: bool = True,\n **kwargs: Any,\n ) -> StructuredTool:\n \"\"\"Create tool from a given function.\n\n A classmethod that helps to create a tool from a function.\n\n Args:\n func: The function from which to create a tool\n coroutine: The async function from which to create a tool\n name: The name of the tool. Defaults to the function name\n description: The description of the tool. Defaults to the function docstring\n return_direct: Whether to return the result directly or as a callback\n args_schema: The schema of the tool's input arguments\n infer_schema: Whether to infer the schema from the function's signature\n **kwargs: Additional arguments to pass to the tool\n\n Returns:\n The tool\n\n Examples:\n\n .. code-block:: python\n\n def add(a: int, b: int) -> int:\n \\\"\\\"\\\"Add two numbers\\\"\\\"\\\"\n return a + b\n tool = StructuredTool.from_function(add)\n tool.run(1, 2) # 3\n \"\"\"\n\n if func is not None:\n source_function = func\n elif coroutine is not None:\n source_function = coroutine\n else:\n raise ValueError(\"Function and/or coroutine must be provided\")\n name = name or source_function.__name__\n description = description or source_function.__doc__\n if description is None:\n raise ValueError(\n \"Function must have a docstring if description not provided.\"\n )\n\n # Description example:\n # search_api(query: str) - Searches the API for the query.\n sig = signature(source_function)\n description = f\"{name}{sig} - {description.strip()}\"\n _args_schema = args_schema\n if _args_schema is None and infer_schema:\n _args_schema = create_schema_from_function(f\"{name}Schema\", source_function)\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n args_schema=_args_schema,\n description=description,\n return_direct=return_direct,\n **kwargs,\n )" }, { "identifier": "Tool", "path": "src/tools/base.py", "snippet": "class Tool(BaseTool):\n \"\"\"Tool that takes in function or coroutine directly.\"\"\"\n\n description: str = \"\"\n func: Optional[Callable[..., str]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[str]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n if self.args_schema is not None:\n return self.args_schema.schema()[\"properties\"]\n # For backwards compatibility, if the function signature is ambiguous,\n # assume it takes a single string input.\n return {\"tool_input\": {\"type\": \"string\"}}\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n args, kwargs = super()._to_args_and_kwargs(tool_input)\n # For backwards compatibility. The tool must be run with a single input\n all_args = list(args) + list(kwargs.values())\n if len(all_args) != 1:\n raise ToolException(\n f\"Too many arguments to single-input tool {self.name}.\"\n f\" Args: {all_args}\"\n )\n return tuple(all_args), {}\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n else:\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self._run, run_manager=run_manager, **kwargs), *args\n )\n\n # TODO: this is for backwards compatibility, remove in future\n def __init__(\n self, name: str, func: Optional[Callable], description: str, **kwargs: Any\n ) -> None:\n \"\"\"Initialize tool.\"\"\"\n super(Tool, self).__init__(\n name=name, func=func, description=description, **kwargs\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable],\n name: str, # We keep these required to support backwards compatibility\n description: str,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n coroutine: Optional[\n Callable[..., Awaitable[Any]]\n ] = None, # This is last for compatibility, but should be after func\n **kwargs: Any,\n ) -> Tool:\n \"\"\"Initialize tool from a function.\"\"\"\n if func is None and coroutine is None:\n raise ValueError(\"Function and/or coroutine must be provided\")\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n description=description,\n return_direct=return_direct,\n args_schema=args_schema,\n **kwargs,\n )" }, { "identifier": "log", "path": "src/utils/logger_utils.py", "snippet": "def log(self, latency: float, answer: str, label: str, key: str) -> None:\n self._latency_dict[key].append(latency)\n self._answer_dict[key].append(answer)\n self._label_dict[key].append(label)" } ]
import asyncio from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.llms import BaseLLM from langchain.prompts.base import StringPromptValue from src.callbacks.callbacks import AsyncStatsCallbackHandler from src.chains.chain import Chain from src.llm_compiler.constants import JOINNER_REPLAN from src.llm_compiler.planner import Planner from src.llm_compiler.task_fetching_unit import Task, TaskFetchingUnit from src.tools.base import StructuredTool, Tool from src.utils.logger_utils import log
11,119
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt
self.planner = Planner(
3
2023-12-06 21:12:54+00:00
16k
bytedance/ImageDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,369
opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
14
2023-12-13 21:09:37+00:00
16k
allenai/unified-io-2
t5x/models_test.py
[ { "identifier": "decoding", "path": "t5x/decoding.py", "snippet": "NEG_INF = np.array(-1.0e7)\nMIN_TEMPERATURE = np.array(1e-4)\nclass DecodingState:\nclass SamplingLoopState:\nclass BeamState:\ndef _is_tracer(value: Any):\ndef temperature_sample(\n inputs: jnp.ndarray,\n cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: Callable[[DecodingState],\n Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]],\n eos_id: int,\n decode_rng: Optional[jnp.ndarray] = None,\n num_decodes: int = 1,\n temperature: Union[float, jnp.ndarray] = 1.0,\n topk: int = 1,\n topp: float = 0.0,\n cache_offset: int = 0,\n initial_index: Optional[jnp.ndarray] = None,\n max_decode_steps: Optional[Union[int, jnp.ndarray]] = None,\n max_decode_steps_hard_limit: Optional[int] = None,\n rescale_log_probs: bool = True,\n state_callback_fn: Optional[Callable[[SamplingLoopState],\n SamplingLoopState]] = None,\n logit_callback_fn: Optional[Callable[[jnp.ndarray, SamplingLoopState],\n jnp.ndarray]] = None\n) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef _temperature_sample_single_trial(\n inputs: jnp.ndarray,\n cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: Callable[[DecodingState],\n Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]],\n eos_id: int,\n prng_key: jnp.ndarray,\n temperature: Union[float, jnp.ndarray] = 1.0,\n topk: int = 20,\n topp: Union[float, jnp.ndarray] = 0.0,\n initial_index: Optional[jnp.ndarray] = None,\n max_decode_steps: Optional[Union[int, jnp.ndarray]] = None,\n rescale_log_probs: bool = True,\n state_callback_fn: Optional[Callable[[SamplingLoopState],\n SamplingLoopState]] = None,\n logit_callback_fn: Optional[Callable[[jnp.ndarray, SamplingLoopState],\n jnp.ndarray]] = None\n) -> jnp.ndarray:\n def sampling_loop_cond_fn(state: SamplingLoopState) -> bool:\n def sampling_loop_body_fn(state: SamplingLoopState) -> SamplingLoopState:\n def sample_logits_with_nonzero_temperature(logits):\n def sample_logits_with_zero_temperature(logits):\ndef brevity_penalty(alpha: float, length: int) -> jnp.ndarray:\ndef cache_map(fn, cache, apply_to_index: bool = False):\ndef add_beam_dim(x: jnp.ndarray,\n beam_size: int,\n offset: int = 0) -> jnp.ndarray:\ndef flatten_beam_dim(x: jnp.ndarray, offset: int = 0) -> jnp.ndarray:\ndef unflatten_beam_dim(x: jnp.ndarray,\n batch_size: int,\n beam_size: int,\n offset: int = 0) -> jnp.ndarray:\ndef flat_batch_beam_expand(x: jnp.ndarray,\n beam_size: int,\n offset: int = 0) -> jnp.ndarray:\ndef cache_gather_beams(nested: PyTreeDef,\n beam_indices: jnp.ndarray,\n batch_size: int,\n old_beam_size: int,\n new_beam_size: int,\n one_hot: bool = True,\n offset: int = 0) -> jnp.ndarray:\n def gather_fn(x):\n def gather_fn(x):\n def gather_fn(x):\n def gather_fn(x):\ndef gather_beams(nested: PyTreeDef,\n beam_indices: jnp.ndarray,\n batch_size: int,\n old_beam_size: int,\n new_beam_size: int,\n one_hot: bool = True) -> jnp.ndarray:\n def gather_fn(x):\n def gather_fn(x):\ndef top_k_two_stage(x, k):\ndef gather_topk_beams(nested: PyTreeDef, score_or_log_prob: jnp.ndarray,\n batch_size: int, new_beam_size: int) -> jnp.ndarray:\ndef beam_init(batch_size: int,\n beam_size: int,\n max_decode_len: int,\n cache: Mapping[str, jnp.ndarray],\n offset: int = 0) -> BeamState:\ndef beam_search(inputs: jnp.ndarray,\n cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: Callable[[DecodingState],\n Tuple[jnp.ndarray,\n Mapping[str, jnp.ndarray]]],\n eos_id: int,\n num_decodes: int = 4,\n alpha: float = 0.6,\n max_decode_len: Optional[int] = None,\n decode_rng: Optional[jnp.ndarray] = None,\n cache_offset: int = 0) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def beam_search_loop_cond_fn(state: BeamState) -> bool:\n def beam_search_loop_body_fn(state: BeamState) -> BeamState:" }, { "identifier": "models", "path": "t5x/models.py", "snippet": "class TokensIdsToLogitsCallable(typing_extensions.Protocol):\nclass DecodeFnCallable(typing_extensions.Protocol):\nclass BaseModel(abc.ABC):\nclass BaseTransformerModel(BaseModel):\nclass EncoderDecoderModel(BaseTransformerModel):\nclass DecoderOnlyModel(BaseTransformerModel):\n def __call__(\n self, decoding_state: decoding.DecodingState\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jax.random.KeyArray],\n cache_offset: int, **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def __init__(self, optimizer_def: optimizers.OptimizerDefType):\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def eval_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def predict_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: Optional[DecodeFnCallable] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[Union[\n float, int, str, losses.SpecialLossNormalizingFactor]] = None,\n ):\n def input_vocabulary(self):\n def output_vocabulary(self):\n def decode_fn(self):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def _compute_metrics(\n self,\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n ) -> MetricsMap:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.beam_search,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False,\n other_variables: Optional[PyTreeDef] = None,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n def _compute_logits_from_slice(\n self, decoding_state: decoding.DecodingState, params: PyTreeDef,\n encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n prompt_with_targets: bool = False\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n def __init__(\n self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.temperature_sample,\n inputs_bidirectional_attention: bool = False,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _get_decoder_causal_attention(self, batch):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False) -> jnp.ndarray:\n def _compute_logits_from_slice(\n self,\n decoding_state: decoding.DecodingState,\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def _compute_kv_cache(\n self,\n params: PyTreeDef,\n inputs: jnp.ndarray,\n inputs_lengths: jnp.ndarray,\n decoder_causal_attention: jnp.ndarray,\n ) -> PyTreeDef:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef compute_metrics(logits: jnp.ndarray, targets: jnp.ndarray,\n weights: jnp.ndarray, loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n additional_metrics: MetricsMap) -> MetricsMap:\ndef compute_base_metrics(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n) -> MetricsMap:\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n FEATURE_CONVERTER_CLS: Callable[..., seqio.FeatureConverter]\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter" }, { "identifier": "partitioning", "path": "t5x/partitioning.py", "snippet": "class AxisNames(tuple):\nclass LocalChunkInfo:\nclass LocalChunker:\nclass DataLayout:\nclass BasePartitioner(metaclass=abc.ABCMeta):\nclass PjittedFnWithContext(PartitionedCallable):\nclass BasePjitPartitioner(BasePartitioner):\nclass PjitPartitioner(BasePjitPartitioner):\n def __new__(cls, *names):\n def __repr__(self):\ndef pjit(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef pjit_with_cpu_fallback(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef with_sharding_constraint(x, axis_resources):\ndef bounds_from_last_device(\n last_device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef get_coords(device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef global_mesh_defined():\ndef get_mesh(model_parallel_submesh: HardwareMesh,\n input_devices: Sequence[JaxDevice] = (),\n input_local_devices: Sequence[JaxDevice] = (),\n tile_by_host_if_needed: bool = True,\n backend: Optional[str] = None) -> Mesh:\n def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:\ndef get_cpu_mesh() -> Mesh:\ndef get_gpu_mesh(num_partitions: int) -> Mesh:\ndef default_mesh(num_partitions: int,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n backend: Optional[str] = None) -> Mesh:\n def __init__(self, global_mesh: Mesh):\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\ndef standard_logical_axis_rules(\n activation_partitioning_dims: int = 1,\n parameter_partitioning_dims: int = 1,\n additional_rules: Optional[LogicalAxisRules] = None) -> LogicalAxisRules:\ndef _id_fn(x, ix):\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None):\n def mesh(self) -> Mesh:\n def data_partition_spec(self) -> PartitionSpec:\n def get_data_layout(self,\n batch_size: Optional[int] = None,\n host_index: Optional[int] = None) -> DataLayout:\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\n def params_on_devices(self):\n def move_params_to_devices(self, train_state: TrainState,\n train_state_axes: TrainState) -> TrainState:\n def _local_chunker(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PartitionedCallable:\n def compile(self, partitioned_fn: PartitionedCallable,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n pjitted_fn,\n partition_mesh: Mesh,\n logical_axis_rules: flax_partitioning.LogicalRules = ()):\n def __call__(self, *args):\n def lower(self, *args):\n def _local_chunker(self) -> LocalChunker:\n def mesh(self) -> Mesh:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def compile(self, partitioned_fn: PjittedFnWithContext,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None,\n logical_axis_rules: Optional[LogicalAxisRules] = None,\n use_cpu_pjit: Optional[bool] = False):\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def logical_axis_rules(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def _logical_to_mesh_axes(param_name, logical_axes):" }, { "identifier": "test_utils", "path": "t5x/test_utils.py", "snippet": "class CpuDevice:\nclass GpuDevice:\nclass TpuDevice:\n class DummyVocab:\ndef coords_to_idx(coords: Tuple[int, ...], bounds: Tuple[int, ...]) -> int:\ndef make_devices(nx: int,\n ny: int,\n nz: int,\n nc: int = 2,\n host_layout: Tuple[int, ...] = (2, 2, 1, 2),\n kind='TPU v3'):\ndef get_t5_test_model(**config_overrides) -> models.EncoderDecoderModel:\ndef with_mesh(named_shape: MeshSpec) -> Generator[None, None, None]:\ndef create_global_mesh(mesh_shape, axis_names):\ndef get_fake_vocab():\ndef get_fake_tokenized_dataset(*_, split='validation', **__):\ndef assert_equal(a, b):\ndef assert_same(tree_a, tree_b):\ndef get_train_state_from_variables(variables,\n optimizer_def=adafactor.Adafactor(0.0)):\n_FAKE_TOKENIZED_DATASET = {\n 'train': [\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: this',\n 'targets': (3, 8, 6, 3, 5, 10),\n 'targets_pretokenized': 'is a test'\n },\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: that',\n 'targets': (17, 5, 6, 3, 5, 10),\n 'targets_pretokenized': 'was a test'\n },\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: those',\n 'targets': (17, 4, 23, 4, 10, 6),\n 'targets_pretokenized': 'were tests'\n },\n ],\n # Notice that we repeat consecutively each examples 4 times,\n # this needed for tests like infer_tests to validate determinism.\n 'validation': [{\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: this',\n 'targets': (3, 8, 6, 3, 5, 3, 25, 5),\n 'targets_pretokenized': 'is a validation',\n }] * 4 + [{\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 17),\n 'inputs_pretokenized': 'complete: that',\n 'targets': (17, 5, 6, 3, 5, 22, 7, 24),\n 'targets_pretokenized': 'was another validation',\n }] * 4\n}" }, { "identifier": "trainer", "path": "t5x/trainer.py", "snippet": "def _merge_metrics(a, b):\ndef merge_metrics(a, b):\n def result(self) -> Mapping[str, Array]:\n def result(self) -> Mapping[str, clu.values.Value]:\n def result(self) -> float:\n def __call__(\n self,\n step: jnp.ndarray,\n ) -> jnp.ndarray:\n def __call__(self, metrics: MetricMapType, duration: float,\n num_steps: int) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, train_state: train_state_lib.TrainState,\n batch: BatchType) -> Tuple[train_state_lib.TrainState, MetricMapType]:\n def __call__(self, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def _make_rms_metrics(name, tree):\n def _make_max_metrics(name, tree):\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def __init__(self):\n def close(self):\n def __del__(self):\n def _get_completion_future(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def _get_completion_time():\n def start(self, block_on: PyTreeDef = ()):\n def stop(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def __init__(self, name: str, summary_dir: Optional[str] = None, log_to_wandb=False):\n def __del__(self):\n def close(self):\n def summary_writer(self) -> metric_writers.MetricWriter:\n def write_scalar(self, key: str, val: metric_writers.interface.Scalar,\n step: int):\n def write_scalars(self, step: int,\n scalars: Mapping[str, metric_writers.interface.Scalar]):\n def start_duration_timer(self, block_on: PyTreeDef = ()):\n def write_metrics_summary(self, metrics: MetricMapType, step: int,\n num_steps: int) -> MetricValueMapFuture:\n def _summarize_and_write():\n def _ensure_not_on_device(x):\n def flush(self):\n def __init__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng,\n use_wandb=False, packing_strategy=None, log_weights=None):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, traceback):\n def close(self):\n def _get_step_rng(self, step: int) -> Rng:\n def train_state(self):\n def train_state(self, train_state: PyTreeDef):\n def _weight_metric_fn(self):\n def _get_weight_metrics_fn(_params):\n def train(self,\n batch_iter: Union[Iterator[BatchType],\n clu.data.dataset_iterator.DatasetIterator],\n num_steps: int,\n start_step: Optional[int] = None) -> ArrayMapFuture:\n def compile_train(self, batch: ElementSpec) -> None:\n def eval(\n self, batch_iters: Mapping[str,\n Iterator[BatchType]], pbar_nsteps=None) -> Mapping[str, Array]:\n def compile_eval(self, batches: Mapping[str, BatchType]) -> None:\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef accumulate_grads_microbatched(\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n dropout_rng: Rng,\n num_microbatches: Optional[int],\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n) -> Tuple[train_state_lib.TrainState, MutableMetricMapType,\n def get_microbatch(batch: BatchType, idx: int) -> Mapping[str, jnp.ndarray]:\n def metrics_and_grad(loop_cnt, dropout_rng, flax_mutables=None):\n def per_microbatch_train_step(\n loop_cnt: int, state: Tuple[jnp.ndarray, jnp.ndarray,\n Mapping[str, jnp.ndarray],\n Optional[FlaxMutables]]\n ) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray],\ndef apply_grads(\n train_state: train_state_lib.TrainState,\n grad_accum: ModelWeights,\n metrics: MutableMetricMapType,\n learning_rate: jnp.ndarray,\n weight_metrics_computer: Optional[WeightMetricsComputer],\n other_state_variables: Optional[Mapping[str, Any]] = None\n) -> Tuple[train_state_lib.TrainState, MetricMapType]:\ndef eval_step(model: models.BaseModel, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\ndef train_with_lr(\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n learning_rate: jnp.ndarray,\n dropout_rng: Rng,\n model: models.BaseModel,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n):\n def __call__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng) -> BaseTrainer:\n def __init__(self,\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str],\n summary_dir: Optional[str],\n train_state_axes: Any,\n rng: Rng,\n learning_rate_fn: LearningRateCallable,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n use_wandb=True,\n packing_strategy=None,\n log_weights=False\n ):\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def train_step(train_state: train_state_lib.TrainState, batch: BatchType, static_args=None):\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef _warn_action_not_run(action, task, metric):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self,\n metric: Tuple[str, str],\n mode: str,\n patience: int = 3,\n atol: float = 0.,\n rtol: float = 0.):\n def _compare_fn(self, current, previous):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self, task: str, metric: str = \"loss\"):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\nclass ArrayMapFuture(typing_extensions.Protocol):\nclass MetricValueMapFuture(typing_extensions.Protocol):\nclass TimeFuture(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass SummarizeMetricsCallable(typing_extensions.Protocol):\nclass PartitionedTrainCallable(typing_extensions.Protocol):\nclass PartitionedEvalCallable(typing_extensions.Protocol):\nclass GradNormComputer(object):\nclass WeightMetricsComputer(object):\nclass _AsyncTimer(object):\nclass MetricsManager(object):\nclass PreemptionError(Exception):\nclass BaseTrainer(abc.ABC):\nclass BaseTrainerConstructor(Protocol):\nclass Trainer(BaseTrainer):\nclass ActionMode(enum.Enum):\nclass BaseAction(abc.ABC):\nclass EarlyStoppingAction(BaseAction):\nclass TerminateOnNanAction(BaseAction):\n _WEIGHT_METRICS = [\n \"weight_rms\", \"weight_gradient_rms\", \"weight_update_rms\", \"weight_max\"\n ]\n TRAIN = 1\n TRAIN_EVAL = 2\n INFER_EVAL = 3" }, { "identifier": "utils", "path": "t5x/utils.py", "snippet": "class EvaluatorConstructor(typing_extensions.Protocol):\nclass SaveCheckpointConfig:\nclass RestoreCheckpointConfig:\nclass CheckpointConfig:\nclass LegacyCheckpointer(orbax.checkpoint.Checkpointer):\nclass LegacyCheckpointManager(orbax.checkpoint.CheckpointManager):\nclass DatasetConfig:\nclass GDADatasetIterator(clu.data.dataset_iterator.DatasetIterator):\nclass InitFnCallable(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass TrainStateInitializer:\nclass InferStepWithRngCallable(typing_extensions.Protocol):\nclass InferStepWithoutRngCallable(typing_extensions.Protocol):\nclass InferFnCallable(typing_extensions.Protocol):\nclass GetDatasetCallable(typing_extensions.Protocol):\nclass GetEvalDatasetCallable(typing_extensions.Protocol):\nclass _RegexMap(collections.abc.Mapping):\n def __call__(\n self,\n mixture_or_task_name: str,\n feature_converter: seqio.FeatureConverter,\n eval_split: str,\n use_cached: bool,\n seed: Optional[int],\n sequence_length: Optional[Mapping[str, int]],\n log_dir: Optional[str],\n use_memory_cache: bool,\n ) -> seqio.Evaluator:\n def __post_init__(self):\n def __post_init__(self):\n def __init__(self,\n *,\n save_checkpointer: Optional[checkpoints.Checkpointer] = None,\n restore_checkpointer: checkpoints.Checkpointer,\n strict: Optional[bool] = False):\n async def async_save(self, path: str, item: Any):\n async def async_restore(self, path: str, item: Optional[Any] = None) -> Any:\n def save(self,\n path: str,\n item: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def restore(self,\n path: str,\n item: Optional[train_state_lib.TrainState],\n state_transformation_fns: Sequence[\n checkpoints.RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def __init__(self,\n *,\n save_cfg: Optional[SaveCheckpointConfig] = None,\n restore_cfg: RestoreCheckpointConfig,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n ds_iter: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n model_dir: Optional[str] = None,\n use_gda: Optional[bool] = True):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = ()):\n def restore(\n self,\n paths: Sequence[str],\n restore_cfg: RestoreCheckpointConfig,\n fallback_state: Optional[Mapping[str, Any]] = None\n ) -> Union[train_state_lib.TrainState, Sequence[train_state_lib.TrainState]]:\ndef _get_index_mappings(device_to_idxs):\ndef _create_gda(partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef, host_arrays: PyTreeDef) -> PyTreeDef:\n def _put_to_devices(x, global_shape):\n def _gda(dbs, global_shape):\n def __init__(self, iterator: clu.data.dataset_iterator.DatasetIterator,\n partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef):\n def __next__(self):\n def reset(self):\n def element_spec(self):\n def save(self, filename):\n def restore(self, filename):\n def iterator(self):\ndef sync_global_devices(name: str) -> None:\ndef multihost_assert_equal(input_tree, fail_message: str = ''):\ndef _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\ndef _hardware_bernoulli(\n rng_key: Array, p: np.ndarray = np.float32(0.5),\n shape: Shape = ()) -> Array:\ndef set_hardware_rng_ops():\ndef get_zeros_batch_like_spec(\n batch_spec: Mapping[str,\n jax.ShapeDtypeStruct]) -> Mapping[str, jnp.ndarray]:\ndef get_zeros_batch_like_dataset(dataset: tf.data.Dataset,\n batch_size=None) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, rng: Array, input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str,\n DType]]) -> flax_scope.FrozenVariableDict:\n def __call__(self, step: jnp.ndarray) -> jnp.ndarray:\ndef create_learning_rate_scheduler(\n factors: str = 'constant * linear_warmup * rsqrt_decay',\n base_learning_rate: float = 0.5,\n warmup_steps: int = 1000,\n decay_factor: float = 0.5,\n steps_per_decay: int = 20000,\n steps_per_cycle: int = 100000,\n step_offset: int = 0,\n min_learning_rate: float = 1e-8) -> LearningRateCallable:\n def step_fn(step: jnp.ndarray) -> jnp.ndarray:\ndef steps(prefix, config, data_size=None, batch_size=None, default=ValueError):\ndef create_vision_learning_rate_scheduler(\n total_steps, batch_size=None, data_size=None,\n base=1.0, decay_type=\"stair\",\n scale_with_batchsize=False, **kw):\n def step_fn(step):\ndef get_first_valid_restore_config_and_paths(\n restore_cfgs: Sequence[RestoreCheckpointConfig]\n) -> Tuple[Optional[RestoreCheckpointConfig], Sequence[str]]:\ndef get_fallback_state(restore_cfg: RestoreCheckpointConfig,\n init_fn: Callable[[jnp.ndarray], Mapping[str, Any]],\n init_rng: jnp.ndarray) -> Optional[Mapping[str, Any]]:\n def __init__(self,\n optimizer_def: Optional[optimizers.OptimizerDefType],\n init_fn: InitFnCallable,\n input_shapes: Mapping[str, Array],\n partitioner: partitioning.BasePartitioner,\n model=None,\n input_types: Optional[Mapping[str, DType]] = None):\n def initialize_train_state(rng: Array):\n def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n def from_checkpoints(\n self,\n restore_cfgs: Sequence[RestoreCheckpointConfig],\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None,\n ) -> Iterable[train_state_lib.TrainState]:\n def _restore_path(path, cfg):\n def from_checkpoint(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None\n ) -> Optional[train_state_lib.TrainState]:\n def from_checkpoint_or_scratch(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n init_rng: Array,\n ds_iter: Optional[tf.data.Iterator] = None) -> train_state_lib.TrainState:\ndef log_model_info(log_file: Optional[str],\n full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n def _log_info_and_write_to_file(writer, format_str, *args):\n def _log_variable(name: str, arr: Optional[np.ndarray],\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n def __call__(self,\n params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray],\n rng: jnp.ndarray = None) -> PyTreeDef:\n def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n def __call__(\n self,\n ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None\n ) -> Union[_InferFnResult, _InferFnWithAuxResult]:\ndef _remove_padding(all_inferences, all_indices):\ndef get_infer_fn(infer_step: InferStepCallable, batch_size: int,\n train_state_axes: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner, \n pbar=False) -> InferFnCallable:\n def infer_step_with_indices(params, batch, rng, indices):\n def infer_fn(ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None):\n def _copy_to_host_async(x):\ndef import_module(module: str):\ndef get_vocabulary(\n cfg: DatasetConfig) -> Tuple[seqio.Vocabulary, seqio.Vocabulary]:\ndef verify_matching_vocabs(cfg: DatasetConfig, model: Any):\ndef get_dataset(cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = False,\n batching_fn=None) -> tf.data.Dataset:\ndef get_dataset_inner(cfg: DatasetConfig,\n shard_info: seqio.ShardInfo,\n feature_converter_cls: Callable[...,\n seqio.FeatureConverter],\n seed: Optional[int] = None,\n num_epochs: Optional[int] = None,\n batching_fn=None\n ):\n def __call__(\n self,\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = True\n ) -> Union[clu.data.dataset_iterator.DatasetIterator, tf.data.Dataset]:\n def __call__(\n self, cfg: DatasetConfig, shard_id: int, num_shards: int, eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter]\n ) -> Mapping[str, tf.data.Dataset]:\ndef get_training_eval_datasets(\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n deterministic: bool = False,\n model_dir: Optional[str] = None,\n start_step: int = 0,\n) -> Mapping[str, tf.data.Dataset]:\n def _repeat_shard_batch_take_cache(ds: tf.data.Dataset):\ndef round_vocab_size_to_multiple(vocabulary: seqio.Vocabulary,\n divisor: int = 128):\ndef flatten_dict_string_keys(x):\ndef flatten_lists(lsts: Iterable[Iterable]) -> Sequence:\n def __init__(self, kvs: Sequence[Tuple[str, Any]]):\n def __getitem__(self, key: str) -> Any:\n def __len__(self) -> int:\n def __iter__(self) -> Iterable[Tuple[re.Pattern, Any]]:\ndef override_params_axes_names(\n model_variables: flax_scope.FrozenVariableDict,\n params_axes_names_override: Sequence[Tuple[str, Tuple[str, ...]]] = ()\n) -> flax_scope.FrozenVariableDict:\ndef get_local_data(x):" } ]
import functools import flax import jax import jax.numpy as jnp import numpy as np import t5.data.tasks # pylint:disable=unused-import import tensorflow as tf from unittest import mock from absl import logging from absl.testing import absltest from absl.testing import parameterized from flax import traverse_util from t5x import decoding from t5x import models from t5x import partitioning from t5x import test_utils from t5x import trainer as trainer_lib from t5x import utils
12,466
shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids']) np.testing.assert_allclose(called_with[1]['encoder_segment_ids'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_segment_ids']) if 'decoder_segment_ids' in shapes: decoder_segment_ids = jnp.ones( shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids']) np.testing.assert_allclose(called_with[1]['decoder_segment_ids'], decoder_segment_ids) else: self.assertIsNone(called_with[1]['decoder_segment_ids']) self.assertFalse(called_with[1]['decode']) self.assertFalse(called_with[1]['enable_dropout']) @parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), }
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for t5x.models.""" # Parse absl flags test_srcdir and test_tmpdir. jax.config.parse_flags_with_absl() PartitionSpec = partitioning.PartitionSpec class ModelsTest(parameterized.TestCase): def test_remove_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([2, 4]) expected = [[3, 4, 5, 6, 7, 0, 0, 0], [10, 11, 0, 0, 0, 0, 0, 0]] remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) np.testing.assert_array_equal(actual, expected) def test_remove_prefix_zero_len_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([0, 0]) remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) # The expected output is the original sequences. np.testing.assert_array_equal(actual, sequences) BATCH_SIZE, ENCODER_LEN, MAX_DECODE_LEN, EMBED_DIM = 2, 3, 4, 5 class EncoderDecoderModelTest(parameterized.TestCase): @parameterized.named_parameters( dict( testcase_name='no_types', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types=None), dict( testcase_name='int32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32 }), dict( testcase_name='float32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_positions': [1, 512], 'decoder_positions': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_positions': jnp.int32, 'decoder_positions': jnp.int32 }), dict( testcase_name='float32_segment_ids', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_segment_ids': [1, 512], 'decoder_segment_ids': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_segment_ids': jnp.int32, 'decoder_segment_ids': jnp.int32 }), ) def test_get_initial_variables_shapes_and_types(self, shapes, types): mock_transformer = mock.Mock() mock_transformer.init.return_value = {'params': {}} mock_optimizer_def = mock.Mock() rng = mock.Mock() def mock_init(self): self.module = mock_transformer self.optimizer_def = mock_optimizer_def with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.get_initial_variables(rng, shapes, types) if types is None: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=jnp.float32) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=jnp.float32) else: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=types['encoder_input_tokens']) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens']) # Using `.assert_called_once_with` doesn't work because the simple # comparison it does for the array arguments fail (truth value of an array # is ambiguous). called_with = mock_transformer.init.call_args self.assertEqual(called_with[0][0], rng) np.testing.assert_allclose(called_with[0][1], encoder_input) np.testing.assert_allclose(called_with[0][2], decoder_input) np.testing.assert_allclose(called_with[0][3], decoder_input) if 'encoder_positions' in shapes: encoder_positions = jnp.ones( shapes['encoder_positions'], dtype=types['encoder_positions']) np.testing.assert_allclose(called_with[1]['encoder_positions'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_positions']) if 'decoder_positions' in shapes: decoder_positions = jnp.ones( shapes['decoder_positions'], dtype=types['decoder_positions']) np.testing.assert_allclose(called_with[1]['decoder_positions'], decoder_positions) else: self.assertIsNone(called_with[1]['decoder_positions']) if 'encoder_segment_ids' in shapes: encoder_positions = jnp.ones( shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids']) np.testing.assert_allclose(called_with[1]['encoder_segment_ids'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_segment_ids']) if 'decoder_segment_ids' in shapes: decoder_segment_ids = jnp.ones( shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids']) np.testing.assert_allclose(called_with[1]['decoder_segment_ids'], decoder_segment_ids) else: self.assertIsNone(called_with[1]['decoder_segment_ids']) self.assertFalse(called_with[1]['decode']) self.assertFalse(called_with[1]['enable_dropout']) @parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), }
model = test_utils.get_t5_test_model(vocab_size=50)
3
2023-12-12 20:23:33+00:00
16k
zju3dv/EasyVolcap
easyvolcap/utils/gl_utils.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out" }, { "identifier": "Camera", "path": "easyvolcap/utils/viewer_utils.py", "snippet": "class Camera:\n # Helper class to manage camera parameters\n def __init__(self,\n H: int = 512,\n W: int = 512,\n K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics\n R: torch.Tensor = torch.tensor([[-1.0, 0.0, 0.0,], [0.0, 0.0, -1.0,], [0.0, -1.0, 0.0,]]), # extrinsics\n T: torch.Tensor = torch.tensor([[0.0], [0.0], [-3.0],]), # extrinsics\n n: float = 0.002, # bounds limit\n f: float = 100, # bounds limit\n t: float = 0.0, # temporal dimension (implemented as a float instead of int)\n v: float = 0.0, # view dimension (implemented as a float instead of int)\n bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box\n\n # camera update hyperparameters\n origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),\n world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),\n movement_speed: float = 1.0, # gui movement speed\n\n batch: dotdict = None, # will ignore all other inputs\n string: str = None, # will ignore all other inputs\n **kwargs,\n ) -> None:\n\n # Batch (network input parameters)\n if string is None:\n if batch is None:\n batch = dotdict()\n batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds\n self.from_batch(batch)\n \n # Other configurables\n self.origin = vec3(*origin)\n self.world_up = vec3(*world_up)\n self.movement_speed = movement_speed\n # self.front = self.front # will trigger an update\n else:\n self.from_string(string)\n\n # Internal states to facilitate camera position change\n self.is_dragging = False # rotation\n self.about_origin = False # about origin rotation\n self.is_panning = False # translation\n self.lock_fx_fy = True\n\n @property\n def w2p(self):\n ixt = mat4(self.ixt)\n ixt[3, 3] = 0\n ixt[2, 3] = 1\n return ixt @ self.ext # w2c -> c2p = w2p\n\n @property\n def V(self): return self.c2w\n\n @property\n def ixt(self): return self.K\n\n @property\n def gl_ext(self):\n gl_c2w = self.c2w\n gl_c2w[0] *= 1 # flip x\n gl_c2w[1] *= -1 # flip y\n gl_c2w[2] *= -1 # flip z\n gl_ext = glm.affineInverse(gl_c2w)\n return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt\n\n @property\n def gl_ixt(self):\n # Construct opengl camera matrix with projection & clipping\n # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # fmt: off\n gl_ixt = mat4(\n 2 * self.fx / self.W, 0, 0, 0,\n 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n )\n # fmt: on\n\n return gl_ixt\n\n @property\n def ext(self): return self.w2c\n\n @property\n def w2c(self):\n w2c = mat4(self.R)\n w2c[3] = vec4(*self.T, 1.0)\n return w2c\n\n @property\n def c2w(self):\n return glm.affineInverse(self.w2c)\n\n @property\n def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,\n\n @property\n def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,\n\n @property\n def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,\n\n @front.setter\n def front(self, v: vec3):\n front = v # the last row of R\n self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z\n right = glm.normalize(glm.cross(self.front, self.world_up)) # right\n self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z\n down = glm.cross(self.front, self.right) # down\n self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z\n\n @property\n def center(self): return -glm.transpose(self.R) @ self.T # 3,\n\n @center.setter\n def center(self, v: vec3):\n self.T = -self.R @ v # 3, 1\n\n @property\n def s(self): return self.K[1, 0]\n\n @s.setter\n def s(self, s): self.K[1, 0] = s\n\n @property\n def fx(self): return self.K[0, 0]\n\n @fx.setter\n def fx(self, v: float):\n v = min(v, 1e5)\n v = max(v, 1e-3)\n if self.lock_fx_fy:\n self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]\n self.K[0, 0] = v\n\n @property\n def fy(self): return self.K[1, 1]\n\n @fy.setter\n def fy(self, v: float):\n if self.lock_fx_fy:\n self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]\n self.K[1, 1] = v\n\n @property\n def cx(self): return self.K[2, 0]\n\n @cx.setter\n def cx(self, v: float):\n self.K[2, 0] = v\n\n @property\n def cy(self): return self.K[2, 1]\n\n @cy.setter\n def cy(self, v: float):\n self.K[2, 1] = v\n\n def begin_dragging(self,\n x: float, y: float,\n is_panning: bool,\n about_origin: bool,\n ):\n self.is_dragging = True\n self.is_panning = is_panning\n self.about_origin = about_origin\n self.drag_start = vec2([x, y])\n\n # Record internal states # ? Will this make a copy?\n self.drag_start_front = self.front # a recording\n self.drag_start_down = self.down\n self.drag_start_right = self.right\n self.drag_start_center = self.center\n self.drag_start_origin = self.origin\n self.drag_start_world_up = self.world_up\n\n # Need to find the max or min delta y to align with world_up\n dot = glm.dot(self.world_up, self.drag_start_front)\n self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down\n self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin\n\n def end_dragging(self):\n self.is_dragging = False\n\n def update_dragging(self, x: float, y: float):\n if not self.is_dragging:\n return\n\n current = vec2(x, y)\n delta = current - self.drag_start\n delta /= max(self.H, self.W)\n delta *= -1\n\n if self.is_panning:\n delta *= self.movement_speed\n center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down\n self.center = self.drag_start_center + center_delta\n if self.about_origin:\n self.origin = self.drag_start_origin + center_delta\n else:\n m = mat4(1.0)\n m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)\n m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)\n self.front = m @ self.drag_start_front # might overshoot\n\n if self.about_origin:\n self.center = -m @ (self.origin - self.drag_start_center) + self.origin\n\n def move(self, x_offset: float, y_offset: float):\n speed_factor = 1e-1\n movement = y_offset * speed_factor\n movement = movement * self.front * self.movement_speed\n self.center += movement\n\n if self.is_dragging:\n self.drag_start_center += movement\n\n def to_batch(self):\n meta = dotdict()\n meta.H = torch.as_tensor(self.H)\n meta.W = torch.as_tensor(self.W)\n meta.K = torch.as_tensor(self.K.to_list()).mT\n meta.R = torch.as_tensor(self.R.to_list()).mT\n meta.T = torch.as_tensor(self.T.to_list())[..., None]\n meta.n = torch.as_tensor(self.n)\n meta.f = torch.as_tensor(self.f)\n meta.t = torch.as_tensor(self.t)\n meta.v = torch.as_tensor(self.v)\n meta.bounds = torch.as_tensor(self.bounds.to_list()) # no transpose for bounds\n\n # GUI related elements\n meta.movement_speed = torch.as_tensor(self.movement_speed)\n meta.origin = torch.as_tensor(self.origin.to_list())\n meta.world_up = torch.as_tensor(self.world_up.to_list())\n\n batch = dotdict()\n batch.update(meta)\n batch.meta.update(meta)\n return batch\n\n def to_easymocap(self):\n batch = self.to_batch()\n camera = to_numpy(batch)\n return camera\n\n def from_easymocap(self, camera: dict):\n batch = to_tensor(camera)\n self.from_batch(batch)\n return self\n\n def to_string(self) -> str:\n batch = to_list(self.to_batch().meta)\n return json.dumps(batch)\n\n def from_string(self, string: str):\n batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)\n self.from_batch(batch)\n\n def from_batch(self, batch: dotdict):\n H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds\n\n # Batch (network input parameters)\n self.H = int(H)\n self.W = int(W)\n self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel()) # 3,\n self.n = float(n)\n self.f = float(f)\n self.t = float(t)\n self.v = float(v)\n self.bounds = mat2x3(*bounds.ravel()) # 2, 3\n\n if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)\n if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,\n if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,\n return self\n\n def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):\n # self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel())" }, { "identifier": "cm_cpu_store", "path": "easyvolcap/utils/color_utils.py", "snippet": "def colormap(v: torch.Tensor, cm: str = 'virdis'):\ndef colormap_linear(v: torch.Tensor, cm: NoneType = None):\ndef colormap_dict(v: torch.Tensor, cm: torch.Tensor):\ndef colormap_list(v: torch.Tensor, cm: torch.Tensor):\ndef yuv_to_rgb(x):\ndef rgb_to_yuv(x):\ndef image_derivative(img: torch.Tensor, mode='sobel', normalized=True) -> torch.Tensor:\ndef image_pyramid(input: torch.Tensor, max_level: int = 4) -> List[torch.Tensor]:\ndef variance_of_laplacian(img: torch.Tensor):" }, { "identifier": "depth_curve_fn", "path": "easyvolcap/utils/depth_utils.py", "snippet": "def depth_curve_fn(depth: torch.Tensor, p: float = 0.01, cm: str = 'linear'):\n depth = normalize_depth(depth)\n depth = colormap(depth, cm)\n return depth" }, { "identifier": "load_pts", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_pts(filename: str):\n from pyntcloud import PyntCloud\n cloud = PyntCloud.from_file(filename)\n verts = cloud.xyz\n if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:\n r = np.asarray(cloud.points['red'])\n g = np.asarray(cloud.points['green'])\n b = np.asarray(cloud.points['blue'])\n colors = np.stack([r, g, b], axis=-1) / 255\n elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:\n r = np.asarray(cloud.points['r'])\n g = np.asarray(cloud.points['g'])\n b = np.asarray(cloud.points['b'])\n colors = np.stack([r, g, b], axis=-1) / 255\n else:\n colors = None\n\n if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:\n nx = np.asarray(cloud.points['nx'])\n ny = np.asarray(cloud.points['ny'])\n nz = np.asarray(cloud.points['nz'])\n norms = np.stack([nx, ny, nz], axis=-1)\n else:\n norms = None\n\n if 'alpha' in cloud.points:\n cloud.points['alpha'] = cloud.points['alpha'] / 255\n\n reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']\n scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added\n return verts, colors, norms, scalars" }, { "identifier": "load_mesh", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):\n from pytorch3d.io import load_ply, load_obj\n if backend == 'trimesh':\n import trimesh\n mesh: trimesh.Trimesh = trimesh.load(filename)\n return mesh.vertices, mesh.faces\n\n vm, fm = None, None\n if filename.endswith('.npz'):\n mesh = np.load(filename)\n v = torch.from_numpy(mesh['verts'])\n f = torch.from_numpy(mesh['faces'])\n\n if load_uv:\n vm = torch.from_numpy(mesh['uvs'])\n fm = torch.from_numpy(mesh['uvfaces'])\n else:\n if filename.endswith('.ply'):\n v, f = load_ply(filename)\n elif filename.endswith('.obj'):\n v, faces_attr, aux = load_obj(filename)\n f = faces_attr.verts_idx\n\n if load_uv:\n vm = aux.verts_uvs\n fm = faces_attr.textures_idx\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')\n\n v = v.to(device, non_blocking=True).contiguous()\n f = f.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n vm = vm.to(device, non_blocking=True).contiguous()\n fm = fm.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n if load_aux:\n return v, f, vm, fm, aux\n else:\n return v, f, vm, fm\n else:\n return v, f" }, { "identifier": "to_cuda", "path": "easyvolcap/utils/data_utils.py", "snippet": "def to_cuda(batch, device=\"cuda\", ignore_list: bool = False) -> torch.Tensor:\n if isinstance(batch, (tuple, list)):\n batch = [to_cuda(b, device, ignore_list) for b in batch]\n elif isinstance(batch, dict):\n batch = dotdict({k: (to_cuda(v, device, ignore_list) if k != \"meta\" else v) for k, v in batch.items()})\n elif isinstance(batch, torch.Tensor):\n batch = batch.to(device, non_blocking=True)\n else: # numpy and others\n batch = torch.as_tensor(batch, device=device)\n return batch" }, { "identifier": "prepare_feedback_transform", "path": "easyvolcap/utils/fcds_utils.py", "snippet": "def prepare_feedback_transform(H: int, W: int, K: torch.Tensor, R: torch.Tensor, T: torch.Tensor,\n n: torch.Tensor,\n f: torch.Tensor,\n xyz: torch.Tensor,\n rgb: torch.Tensor,\n rad: torch.Tensor):\n ixt = get_ndc_perspective_matrix(K, H, W, n[..., 0], f[..., 0]).to(xyz.dtype) # to opengl, remove last dim of n and f\n w2c = affine_padding(torch.cat([R, T], dim=-1)).to(xyz.dtype)\n c2w = affine_inverse(w2c)\n c2w[..., 0] *= 1 # flip x\n c2w[..., 1] *= -1 # flip y\n c2w[..., 2] *= -1 # flip z\n ext = affine_inverse(c2w)\n pix_xyz = torch.cat([xyz, torch.ones_like(xyz[..., :1])], dim=-1) @ ext.mT @ ixt.mT\n pix_rad = abs(H * ixt[..., 1, 1][..., None, None] * rad / pix_xyz[..., -1:]) # z: B, 1 * B, N, world space radius -> ndc radius B, N, 1\n\n # Prepare data to be rendered\n data = torch.cat([pix_xyz, rgb, pix_rad], dim=-1).ravel() # organize the data inside vbo\n return data" }, { "identifier": "get_opencv_camera_params", "path": "easyvolcap/utils/fcds_utils.py", "snippet": "def get_opencv_camera_params(batch: dotdict):\n H = batch.meta.H[0].item() # !: BATCH\n W = batch.meta.W[0].item() # !: BATCH\n K = batch.K\n R = batch.R\n T = batch.T\n C = -batch.R.mT @ batch.T # B, 3, 1\n return H, W, K, R, T, C" }, { "identifier": "typed", "path": "easyvolcap/utils/net_utils.py", "snippet": "def typed(input_to: torch.dtype = torch.float, output_to: torch.dtype = torch.float):\n from easyvolcap.utils.data_utils import to_x\n\n def wrapper(func: Callable):\n def inner(*args, **kwargs):\n args = to_x(args, input_to)\n kwargs = to_x(kwargs, input_to)\n ret = func(*args, **kwargs)\n ret = to_x(ret, output_to)\n return ret\n return inner\n return wrapper" }, { "identifier": "multi_gather", "path": "easyvolcap/utils/net_utils.py", "snippet": "def multi_gather(values: torch.Tensor, indices: torch.Tensor, dim=-2):\n # Gather the value at the -2th dim of values, augment index shape on the back\n # Example: values: B, P, 3, index: B, N, -> B, N, 3\n\n # index will first be augmented to match the values' dimentionality at the back\n # take care of batch dimension of, and acts like a linear indexing in the target dimention\n # we assume that the values's second to last dimension is the dimension to be indexed on\n return values.gather(dim, multi_indexing(indices, values.shape, dim))" }, { "identifier": "create_meshgrid", "path": "easyvolcap/utils/net_utils.py", "snippet": "@torch.jit.script\ndef create_meshgrid(H: int, W: int, device: torch.device = torch.device('cuda'), indexing: str = 'ij', ndc: bool = False,\n correct_pix: bool = True, dtype: torch.dtype = torch.float):\n # kornia has meshgrid, but not the best\n i = torch.arange(H, device=device, dtype=dtype)\n j = torch.arange(W, device=device, dtype=dtype)\n if correct_pix:\n i = i + 0.5\n j = j + 0.5\n if ndc:\n i = i / H * 2 - 1\n j = j / W * 2 - 1\n ij = torch.meshgrid(i, j, indexing=indexing) # defaults to ij\n ij = torch.stack(ij, dim=-1) # Ht, Wt, 2\n\n return ij" }, { "identifier": "volume_rendering", "path": "easyvolcap/utils/net_utils.py", "snippet": "def volume_rendering(rgb: torch.Tensor, occ: torch.Tensor, bg_brightness: float = 0.0):\n # NOTE: here occ's last dim is not 1, but n_samples\n # rgb: n_batch, n_rays, n_samples, 3\n # occ: n_batch, n_rays, n_samples, 1\n # bg_image: n_batch, n_rays, 3 or None, if this is given as not None, the last sample on the ray will be replaced by this value (assuming this lies on the background)\n # We need to assume:\n # 1. network will find the True geometry, thus giving the background image its real value\n # 2. background image is rendered in a non-static fasion\n # returns:\n # weights: n_batch, n_rays, n_samples\n # rgb_map: n_batch, n_rays, 3\n # acc_map: n_batch, n_rays, 1\n\n weights = render_weights(occ) # (n_batch, n_rays, n_samples)\n rgb_map, acc_map = render_rgb_acc(weights, rgb)\n rgb_map = rgb_map + (1. - acc_map) * bg_brightness\n\n return weights, rgb_map, acc_map" }, { "identifier": "raw2alpha", "path": "easyvolcap/utils/net_utils.py", "snippet": "def raw2alpha(raws: torch.Tensor, dists=0.005, bias=0.0):\n if isinstance(dists, torch.Tensor):\n if dists.ndim == raws.ndim - 1:\n dists = dists[..., None]\n return 1. - torch.exp(-(raws + bias) * dists)" }, { "identifier": "torch_dtype_to_numpy_dtype", "path": "easyvolcap/utils/net_utils.py", "snippet": "def torch_dtype_to_numpy_dtype(torch_dtype):\n mapping = {\n torch.float32: np.float32,\n torch.float64: np.float64,\n torch.int32: np.int32,\n torch.int64: np.int64,\n torch.int16: np.int16,\n torch.uint8: np.uint8,\n torch.int8: np.int8,\n torch.bool: np.bool_\n }\n return mapping.get(torch_dtype, None)" }, { "identifier": "load_pretrained", "path": "easyvolcap/utils/net_utils.py", "snippet": "def load_pretrained(model_dir: str, resume: bool = True, epoch: int = -1, ext: str = '.npz', remove_if_not_resuming: bool = False, warn_if_not_exist: bool = False):\n if not resume: # remove nothing here\n if remove_if_not_resuming:\n if os.path.isdir(model_dir) and len(os.listdir(model_dir)): # only inform the use if there are files\n # log(red(f\"Removing trained weights: {blue(model_dir)}\"))\n try: run(f'rm -r {model_dir}')\n except: pass\n return None, None\n\n if not os.path.exists(model_dir):\n if warn_if_not_exist:\n log(red(f'Pretrained network: {blue(model_dir)} does not exist'))\n return None, None\n if os.path.isdir(model_dir):\n pts = [\n int(pt.split('.')[0]) for pt in os.listdir(model_dir) if pt != f'latest{ext}' and pt.endswith(ext) and pt.split('.')[0].isnumeric()\n ]\n if len(pts) == 0 and f'latest{ext}' not in os.listdir(model_dir):\n return None, None\n if epoch == -1:\n if f'latest{ext}' in os.listdir(model_dir):\n pt = 'latest'\n else:\n pt = max(pts)\n else:\n pt = epoch\n model_path = join(model_dir, f'{pt}{ext}')\n else:\n model_path = model_dir\n\n if ext == '.pt' or ext == '.pth':\n pretrained = dotdict(torch.load(model_path, 'cpu'))\n else:\n from easyvolcap.utils.data_utils import to_tensor\n pretrained = dotdict(model=to_tensor(dict(**np.load(model_path))), epoch=-1) # the npz files do not contain training parameters\n\n return pretrained, model_path" }, { "identifier": "get_bounds", "path": "easyvolcap/utils/net_utils.py", "snippet": "def get_bounds(xyz, padding=0.05): # 5mm padding? really?\n # xyz: n_batch, n_points, 3\n\n min_xyz = torch.min(xyz, dim=1)[0] # torch min with dim is ...\n max_xyz = torch.max(xyz, dim=1)[0]\n min_xyz -= padding\n max_xyz += padding\n bounds = torch.stack([min_xyz, max_xyz], dim=1)\n return bounds\n diagonal = bounds[..., 1:] - bounds[..., :1] # n_batch, 1, 3\n bounds[..., 1:] = bounds[..., :1] + torch.ceil(diagonal / voxel_size) * voxel_size # n_batch, 1, 3\n return bounds" }, { "identifier": "CHECK_CUDART_ERROR", "path": "easyvolcap/utils/net_utils.py", "snippet": "def CHECK_CUDART_ERROR(args):\n from cuda import cudart\n\n if isinstance(args, tuple):\n assert len(args) >= 1\n err = args[0]\n if len(args) == 1:\n ret = None\n elif len(args) == 2:\n ret = args[1]\n else:\n ret = args[1:]\n else:\n err = args\n ret = None\n\n assert isinstance(err, cudart.cudaError_t), type(err)\n if err != cudart.cudaError_t.cudaSuccess:\n raise RuntimeError(FORMAT_CUDART_ERROR(err))\n\n return ret" }, { "identifier": "FORMAT_CUDART_ERROR", "path": "easyvolcap/utils/net_utils.py", "snippet": "def FORMAT_CUDART_ERROR(err):\n from cuda import cudart\n return (\n f\"{cudart.cudaGetErrorName(err)[1].decode('utf-8')}({int(err)}): \"\n f\"{cudart.cudaGetErrorString(err)[1].decode('utf-8')}\"\n )" } ]
from typing import TYPE_CHECKING from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL from torch import nn from enum import Enum, auto from os.path import join, dirname from typing import Dict, Union, List from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.color_utils import cm_cpu_store from easyvolcap.utils.depth_utils import depth_curve_fn from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params from easyvolcap.utils.net_utils import typed, multi_gather, create_meshgrid, volume_rendering, raw2alpha, torch_dtype_to_numpy_dtype, load_pretrained, get_bounds from easyvolcap.utils.net_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager from OpenGL.GL import shaders from pytorch3d.structures import Pointclouds, Meshes from pytorch3d.structures import Pointclouds, Meshes from cuda import cudart from cuda import cudart from cuda import cudart from easyvolcap.engine.registry import call_from_cfg from easyvolcap.utils.gaussian_utils import GaussianModel from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart import os import glm import torch import ctypes import numpy as np import sys import OpenGL.GL as gl
12,206
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering
from __future__ import annotations if TYPE_CHECKING: # fmt: off # Environment variable messaging # Need to export EGL_DEVICE_ID before trying to import egl # And we need to consider the case when we're performing distributed training # from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS if 'easyvolcap.engine' in sys.modules and (sys.modules['easyvolcap.engine'].args.type != 'gui' or sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type == 'UnitySocketViewer'): # FIXME: GLOBAL VARIABLES try: except Exception as e: log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}')) os.environ['PYOPENGL_PLATFORM'] = '' try: except Exception as e: print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:') print(f'pip install git+https://github.com/mcfletch/pyopengl') raise e # fmt: on def linearize_depth(d, n: float, f: float): # 0-1 -> -1,1 # ndc -> view return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n)) def common_opengl_options(): # Use program point size gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) # Performs face culling gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) # Performs alpha trans testing gl.glEnable(gl.GL_ALPHA_TEST) # Performs z-buffer testing gl.glEnable(gl.GL_DEPTH_TEST) # gl.glDepthMask(gl.GL_TRUE) gl.glDepthFunc(gl.GL_LEQUAL) # gl.glDepthRange(-1.0, 1.0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) # Enable some masking tests gl.glEnable(gl.GL_SCISSOR_TEST) # Enable this to correctly render points # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310 gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory. # # The second argument specifies that our pixels will be in bytes. # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) def load_shader_source(file: str = 'splat.frag'): # Ideally we can just specify the shader name instead of an variable if not exists(file): file = f'{dirname(__file__)}/shaders/{file}' if not exists(file): file = file.replace('shaders/', '') if not exists(file): raise RuntimeError(f'Shader file: {file} does not exist') with open(file, 'r') as f: return f.read() def use_gl_program(program: Union[shaders.ShaderProgram, dict]): if isinstance(program, dict): # Recompile the program if the user supplied sources program = dotdict(program) program = shaders.compileProgram( shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER), shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER) ) return gl.glUseProgram(program) class Mesh: class RenderType(Enum): POINTS = 1 LINES = 2 TRIS = 3 QUADS = 4 # TODO: Support quad loading STRIPS = 5 # Helper class to render a mesh on opengl # This implementation should only be used for debug visualization # Since no differentiable mechanism will be added # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly def __init__(self, verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict(), render_type: RenderType = RenderType.TRIS, # Misc info name: str = 'mesh', filename: str = '', visible: bool = True, # Render options shade_flat: bool = False, # smooth shading point_radius: float = 0.015, render_normal: bool = False, # Storage options store_device: str = 'cpu', compute_device: str = 'cuda', vert_sizes=[3, 3, 3], # pos + color + norm # Init options est_normal_thresh: int = 100000, # Ignore unused input **kwargs, ) -> None: super().__init__() self.name = name self.visible = visible self.render_type = render_type self.shade_flat = shade_flat self.point_radius = point_radius self.render_normal = render_normal self.store_device = store_device self.compute_device = compute_device self.vert_sizes = vert_sizes self.est_normal_thresh = est_normal_thresh # Uniform and program self.compile_shaders() self.uniforms = dotdict() # uniform values # Before initialization self.max_verts = 0 self.max_faces = 0 # OpenGL data if filename: self.load_from_file(filename) else: self.load_from_data(verts, faces, colors, normals, scalars) def compile_shaders(self): try: self.mesh_program = shaders.compileProgram( shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER) ) self.point_program = shaders.compileProgram( shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e @property def n_verts_bytes(self): return len(self.verts) * self.vert_size * self.verts.element_size() @property def n_faces_bytes(self): return len(self.faces) * self.face_size * self.faces.element_size() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts @property def faces_data(self): # a heavy copy operation faces = self.faces.ravel().numpy() # N, 3 faces = np.asarray(faces, dtype=np.uint32, order='C') return faces @property def face_size(self): return self.render_type.value @property def vert_size(self): return sum(self.vert_sizes) def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'): verts, faces, colors, normals, scalars = self.load_data_from_file(filename) self.load_from_data(verts, faces, colors, normals, scalars) def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'): self.name = os.path.split(filename)[-1] verts, faces, colors, normals, scalars = None, None, None, None, None verts, faces = load_mesh(filename, device=self.store_device) if not len(faces): verts, colors, normals, scalars = load_pts(filename) self.render_type = Mesh.RenderType.POINTS else: self.render_type = Mesh.RenderType(faces.shape[-1]) # use value return verts, faces, colors, normals, scalars def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()): # Data type conversion verts = torch.as_tensor(verts) # convert to tensor if input is of other types if verts.dtype == torch.float32: pass # supports this for now elif verts.dtype == torch.float16: pass # supports this for now else: verts = verts.type(torch.float) # convert to float32 if input is of higher precision gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT self.vert_gl_types = [gl_dtype] * len(self.vert_sizes) # Prepare main mesh data: vertices and faces self.verts = torch.as_tensor(verts, device=self.store_device) self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support # Prepare colors and normals if colors is not None: self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype) else: bounds = get_bounds(self.verts[None])[0] self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0]) if normals is not None: self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype) else: self.estimate_vertex_normals() # Prepare other scalars if scalars is not None: for k, v in scalars.items(): setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok? # Prepare OpenGL related buffer self.update_gl_buffers() def estimate_vertex_normals(self): def est_pcd_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: pcd = Pointclouds([self.verts]).to(self.compute_device) self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim def est_tri_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: mesh = Meshes([self.verts], [self.faces]).to(self.compute_device) self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim if not len(self.verts) > self.est_normal_thresh: if self.render_type == Mesh.RenderType.TRIS: est_tri_norms() elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms() else: # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping')) self.normals = self.verts else: # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation')) self.normals = self.verts def offscreen_render(self, eglctx: "eglContextManager", camera: Camera): eglctx.resize(camera.W, camera.H) self.render(camera) def render(self, camera: Camera): if not self.visible: return # For point rendering if self.render_type == Mesh.RenderType.POINTS: gl.glUseProgram(self.point_program) self.use_gl_program(self.point_program) else: gl.glUseProgram(self.mesh_program) self.use_gl_program(self.mesh_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) if self.render_type == Mesh.RenderType.POINTS: gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices elif self.render_type == Mesh.RenderType.LINES: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.TRIS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.QUADS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.STRIPS: gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) else: raise NotImplementedError gl.glBindVertexArray(0) def use_gl_program(self, program: shaders.ShaderProgram): use_gl_program(program) self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat") self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius") self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal") self.uniforms.H = gl.glGetUniformLocation(program, "H") self.uniforms.W = gl.glGetUniformLocation(program, "W") self.uniforms.n = gl.glGetUniformLocation(program, "n") self.uniforms.f = gl.glGetUniformLocation(program, "f") self.uniforms.P = gl.glGetUniformLocation(program, "P") self.uniforms.K = gl.glGetUniformLocation(program, "K") self.uniforms.V = gl.glGetUniformLocation(program, "V") self.uniforms.M = gl.glGetUniformLocation(program, "M") def upload_gl_uniforms(self, camera: Camera): K = camera.gl_ixt # hold the reference V = camera.gl_ext # hold the reference M = glm.identity(mat4) P = K * V * M gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat) gl.glUniform1f(self.uniforms.point_radius, self.point_radius) gl.glUniform1i(self.uniforms.render_normal, self.render_normal) gl.glUniform1i(self.uniforms.H, camera.H) # o2w gl.glUniform1i(self.uniforms.W, camera.W) # o2w gl.glUniform1f(self.uniforms.n, camera.n) # o2w gl.glUniform1f(self.uniforms.f, camera.f) # o2w gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w def update_gl_buffers(self): # Might be overwritten self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0, len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated if hasattr(self, 'verts'): gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference if hasattr(self, 'faces'): gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data) def resize_buffers(self, v: int = 0, f: int = 0): if v > self.max_verts or f > self.max_faces: if v > self.max_verts: self.max_verts = v if f > self.max_faces: self.max_faces = f self.init_gl_buffers(v, f) def init_gl_buffers(self, v: int = 0, f: int = 0): # This will only init the corresponding buffer object n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes # Housekeeping if hasattr(self, 'vao'): gl.glDeleteVertexArrays(1, [self.vao]) gl.glDeleteBuffers(2, [self.vbo, self.ebo]) self.vao = gl.glGenVertexArrays(1) self.vbo = gl.glGenBuffers(1) self.ebo = gl.glGenBuffers(1) gl.glBindVertexArray(self.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao cumsum = 0 for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)): gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float gl.glEnableVertexAttribArray(i) cumsum += s if n_faces_bytes > 0: # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) gl.glBindVertexArray(0) def render_imgui(self): pass class Quad(Mesh): # A shared texture for CUDA (pytorch) and OpenGL # Could be rendererd to screen using blitting or just drawing a quad def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip self.use_cudagl = use_cudagl self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.update_gl_buffers() self.compile_shaders() self.max_H, self.max_W = H, W self.H, self.W = H, W self.compose = compose self.compose_power = compose_power self.init_texture() @property def n_faces_bytes(self): return 0 def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) self.uniforms.tex = gl.glGetUniformLocation(program, 'tex') gl.glUseProgram(self.quad_program) # use a different program gl.glUniform1i(self.uniforms.tex, 0) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_texture() def init_texture(self): if hasattr(self, 'cu_tex'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex)) if hasattr(self, 'fbo'): gl.glDeleteFramebuffers(1, [self.fbo]) gl.glDeleteTextures(1, [self.tex]) # Init the texture to be blit onto the screen self.tex = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Init the framebuffer object if explicit blitting is used (slower than drawing quad) self.fbo = gl.glGenFramebuffers(1) old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo) if self.use_cudagl: if self.compose: # Both reading and writing of this resource is required flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone else: flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags)) def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0): assert self.use_cudagl, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad" w = w or self.W h = h or self.H if image.shape[-1] == 3: image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0)) if self.compose: """ Blit current framebuffer to this texture (self.tex) Read content of this texture into a cuda buffer Perform alpha blending based on the frame's alpha channel Copy the blended image back into the texture (self.tex) """ old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0 gl.glBlitFramebuffer(x, y, w, h, x, y, w, h, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old) buffer = torch.empty_like(image) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst w * 4 * buffer.element_size(), # dpitch cu_tex_arr, # src x * 4 * image.element_size(), # wOffset y, # hOffset w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes) h, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]]) alpha = image[..., -1:] / 255 image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int image[..., -1:] = buffer[..., -1:] + image[..., -1:] image = image.clip(0, 255) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr, x * 4 * image.element_size(), y, image.data_ptr(), w * 4 * image.element_size(), # differently sized w * 4 * image.element_size(), # rgba, should do a composition first h, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) def upload_to_texture(self, ptr: np.ndarray): H, W = ptr.shape[:2] H, W = min(self.H, H), min(self.W, W) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down? @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def render(self, camera: Camera = None): self.draw() # no uploading needed def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ Upload the texture instead of the camera This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(x, y, w, h) gl.glScissor(x, y, w, h) # only render in this small region of the viewport gl.glUseProgram(self.quad_program) # use a different program gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) gl.glBindVertexArray(0) # Some house keepings gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0 gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped x, y, x + w, y + h, # the height is flipped gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old) class UQuad(Mesh): """ Responsible for initializing textures with a single value or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing) Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte """ def __init__(self): self.n_blit_values = 3 self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.compile_shaders() self.uniforms = dotdict() # uniform values self.use_gl_programs(self.quad_program) self.update_gl_buffers() @property def n_faces_bytes(self): return 0 @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def use_gl_programs(self, program: shaders.ShaderProgram): for i in range(self.n_blit_values): self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}') for i in range(self.n_blit_values): self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}') gl.glUseProgram(self.program) # use a different program for i in range(self.n_blit_values): self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}') gl.glUniform1i(self.uniforms[f'tex{i}'], i) def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]): for i, v in enumerate(values): v = vec4(v) # HACK: Hold the reference for this upload gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array for i, v in enumerate(use_texs): gl.glUniform1i(self.uniforms[f'use_tex{i}'], v) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): """ This function will render 'value' to the currently bound framebuffer, up to six outputs """ old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) gl.glUseProgram(self.quad_program) self.upload_gl_uniforms(values, use_texs) # should be a noop # Prepare to render to textures gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices gl.glBindVertexArray(old_vao) gl.glUseProgram(old_prog) class DQuad(UQuad): def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC) gl.glDepthFunc(gl.GL_ALWAYS) super().draw(values, use_texs) gl.glDepthFunc(old_function) def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F): # Prepare for write frame buffers color_buffer = gl.glGenTextures(1) depth_upper = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering
batch = to_cuda(camera.to_batch())
6
2023-12-07 08:53:42+00:00
16k
alibaba/animate-anything
utils/lora_handler.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n motion_mask = False,\n motion_strength = False,\n ):\n super().__init__()\n self.motion_mask = motion_mask\n self.motion_strength = motion_strength\n print(f\"motion mask {self.motion_mask}, motion_strength {self.motion_strength}\")\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n self.conv_in2 = nn.Conv2d(\n 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n cond_proj_dim=block_out_channels[0],\n )\n\n self.motion_proj = Timesteps(block_out_channels[0], True, 0)\n self.motion_embedding = nn.Sequential(\n nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim))\n nn.init.zeros_(self.motion_embedding[-1].weight)\n nn.init.zeros_(self.motion_embedding[-1].bias)\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value \n \n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n condition_latent: torch.Tensor,\n mask: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n motion = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n sample = torch.cat([condition_latent, sample], dim=2)\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n if self.motion_strength and motion is not None:\n timestep_cond = self.motion_proj(motion).to(dtype=self.dtype)\n emb = self.time_embedding(t_emb, timestep_cond)\n #emb += self.motion_embedding(m_emb)\n else:\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n if self.motion_mask and mask is not None:\n mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2])\n sample = torch.cat([mask, sample], dim=1)\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in2(sample)\n else:\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n\n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n \n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n sample = sample[:,:,1:]\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "convert_unet_state_dict", "path": "utils/convert_diffusers_to_original_ms_text_to_video.py", "snippet": "def convert_unet_state_dict(unet_state_dict, strict_mapping=False):\n print ('Converting the UNET')\n # buyer beware: this is a *brittle* function,\n # and correct output requires that all of these pieces interact in\n # the exact order in which I have arranged them.\n mapping = {k: k for k in unet_state_dict.keys()}\n\n for sd_name, hf_name in unet_conversion_map:\n if strict_mapping:\n if hf_name in mapping:\n mapping[hf_name] = sd_name\n else:\n mapping[hf_name] = sd_name\n for k, v in mapping.items():\n if \"resnets\" in k:\n for sd_part, hf_part in unet_conversion_map_resnet:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n # elif \"temp_convs\" in k:\n # for sd_part, hf_part in unet_conversion_map_resnet:\n # v = v.replace(hf_part, sd_part)\n # mapping[k] = v\n for k, v in mapping.items():\n for sd_part, hf_part in unet_conversion_map_layer:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n \n\n # there must be a pattern, but I don't want to bother atm\n do_not_unsqueeze = [f'output_blocks.{i}.1.proj_out.weight' for i in range(3, 12)] + [f'output_blocks.{i}.1.proj_in.weight' for i in range(3, 12)] + ['middle_block.1.proj_in.weight', 'middle_block.1.proj_out.weight'] + [f'input_blocks.{i}.1.proj_out.weight' for i in [1, 2, 4, 5, 7, 8]] + [f'input_blocks.{i}.1.proj_in.weight' for i in [1, 2, 4, 5, 7, 8]]\n print (do_not_unsqueeze)\n\n new_state_dict = {v: (unet_state_dict[k].unsqueeze(-1) if ('proj_' in k and ('bias' not in k) and (k not in do_not_unsqueeze)) else unet_state_dict[k]) for k, v in mapping.items()}\n # HACK: idk why the hell it does not work with list comprehension\n for k, v in new_state_dict.items():\n has_k = False\n for n in do_not_unsqueeze:\n if k == n:\n has_k = True\n\n if has_k:\n v = v.squeeze(-1)\n new_state_dict[k] = v\n\n return new_state_dict" }, { "identifier": "convert_text_enc_state_dict_v20", "path": "utils/convert_diffusers_to_original_ms_text_to_video.py", "snippet": "def convert_text_enc_state_dict_v20(text_enc_dict):\n #print ('Converting the text encoder')\n new_state_dict = {}\n capture_qkv_weight = {}\n capture_qkv_bias = {}\n for k, v in text_enc_dict.items():\n if (\n k.endswith(\".self_attn.q_proj.weight\")\n or k.endswith(\".self_attn.k_proj.weight\")\n or k.endswith(\".self_attn.v_proj.weight\")\n ):\n k_pre = k[: -len(\".q_proj.weight\")]\n k_code = k[-len(\"q_proj.weight\")]\n if k_pre not in capture_qkv_weight:\n capture_qkv_weight[k_pre] = [None, None, None]\n capture_qkv_weight[k_pre][code2idx[k_code]] = v\n continue\n\n if (\n k.endswith(\".self_attn.q_proj.bias\")\n or k.endswith(\".self_attn.k_proj.bias\")\n or k.endswith(\".self_attn.v_proj.bias\")\n ):\n k_pre = k[: -len(\".q_proj.bias\")]\n k_code = k[-len(\"q_proj.bias\")]\n if k_pre not in capture_qkv_bias:\n capture_qkv_bias[k_pre] = [None, None, None]\n capture_qkv_bias[k_pre][code2idx[k_code]] = v\n continue\n\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)\n new_state_dict[relabelled_key] = v\n\n for k_pre, tensors in capture_qkv_weight.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_weight\"] = torch.cat(tensors)\n\n for k_pre, tensors in capture_qkv_bias.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_bias\"] = torch.cat(tensors)\n\n return new_state_dict" }, { "identifier": "extract_lora_ups_down", "path": "utils/lora.py", "snippet": "def extract_lora_ups_down(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for _m, _n, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append((_child_module.lora_up, _child_module.lora_down))\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras" }, { "identifier": "inject_trainable_lora_extended", "path": "utils/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d, nn.Conv3d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv3d(\n _child_module.in_channels,\n _child_module.out_channels,\n bias=_child_module.bias is not None,\n kernel_size=_child_module.kernel_size,\n padding=_child_module.padding,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias \n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n \n _module._modules[name] = _tmp\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = loras.pop(0)\n _module._modules[name].lora_down.weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight.requires_grad = True\n _module._modules[name].lora_down.weight.requires_grad = True\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "save_lora_weight", "path": "utils/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n): \n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float32))\n weights.append(_down.weight.to(\"cpu\").to(torch.float32))\n\n torch.save(weights, path)" }, { "identifier": "train_patch_pipe", "path": "utils/lora.py", "snippet": "def train_patch_pipe(pipe, patch_unet, patch_text):\n if patch_unet:\n print(\"LoRA : Patching Unet\")\n collapse_lora(pipe.unet)\n monkeypatch_remove_lora(pipe.unet)\n\n if patch_text:\n print(\"LoRA : Patching text encoder\")\n\n collapse_lora(pipe.text_encoder)\n monkeypatch_remove_lora(pipe.text_encoder)" }, { "identifier": "monkeypatch_or_replace_lora", "path": "utils/lora.py", "snippet": "def monkeypatch_or_replace_lora(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, LoraInjectedLinear]\n ):\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)" }, { "identifier": "monkeypatch_or_replace_lora_extended", "path": "utils/lora.py", "snippet": "def monkeypatch_or_replace_lora_extended(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[\n nn.Linear, \n nn.Conv2d, \n nn.Conv3d,\n LoraInjectedLinear, \n LoraInjectedConv2d, \n LoraInjectedConv3d,\n ],\n ):\n\n if (_child_module.__class__ == nn.Linear) or (\n _child_module.__class__ == LoraInjectedLinear\n ):\n if len(loras[0].shape) != 2:\n continue\n\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n elif (_child_module.__class__ == nn.Conv2d) or (\n _child_module.__class__ == LoraInjectedConv2d\n ):\n if len(loras[0].shape) != 4:\n continue\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv2d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv2d(\n _source.in_channels,\n _source.out_channels,\n _source.kernel_size,\n _source.stride,\n _source.padding,\n _source.dilation,\n _source.groups,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d or(\n _child_module.__class__ == LoraInjectedConv3d\n ):\n\n if len(loras[0].shape) != 5:\n continue\n\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv3d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv3d(\n _source.in_channels,\n _source.out_channels,\n bias=_source.bias is not None,\n kernel_size=_source.kernel_size,\n padding=_source.padding,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)" }, { "identifier": "activate_lora_train", "path": "stable_lora/lora.py", "snippet": "def activate_lora_train(model, bias):\n def unfreeze():\n print(model.__class__.__name__ + \" LoRA set for training.\")\n return loralb.mark_only_lora_as_trainable(model, bias=bias)\n\n return unfreeze" }, { "identifier": "add_lora_to", "path": "stable_lora/lora.py", "snippet": "def add_lora_to(\n model, \n target_module=UNET_REPLACE, \n search_class=[torch.nn.Linear], \n r=32, \n dropout=0,\n lora_bias='none'\n):\n for module, name, child_module in find_modules(\n model, \n ancestor_class=target_module, \n search_class=search_class\n ):\n bias = hasattr(child_module, \"bias\")\n \n # Check if child module of the model has bias.\n if bias:\n if child_module.bias is None:\n bias = False\n\n # Check if the child module of the model is type Linear or Conv2d.\n if isinstance(child_module, torch.nn.Linear):\n l = create_lora_linear(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Conv2d):\n l = create_lora_conv(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Conv3d):\n l = create_lora_conv3d(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Embedding):\n l = create_lora_emb(child_module, r)\n \n # If the model has bias and we wish to add it, use the child_modules in place\n if bias:\n l.bias = child_module.bias\n \n # Assign the frozen weight of model's Linear or Conv2d to the LoRA model.\n l.weight = child_module.weight\n\n # Replace the new LoRA model with the model's Linear or Conv2d module.\n module._modules[name] = l\n \n\n # Unfreeze only the newly added LoRA weights, but keep the model frozen.\n return activate_lora_train(model, lora_bias)" }, { "identifier": "save_lora", "path": "stable_lora/lora.py", "snippet": "def save_lora(\n unet=None, \n text_encoder=None, \n save_text_weights=False,\n output_dir=\"output\",\n lora_filename=\"lora.safetensors\",\n lora_bias='none', \n save_for_webui=True,\n only_webui=False,\n metadata=None,\n unet_dict_converter=None,\n text_dict_converter=None\n ):\n\n if not only_webui:\n # Create directory for the full LoRA weights.\n trainable_weights_dir = f\"{output_dir}/full_weights\"\n lora_out_file_full_weight = f\"{trainable_weights_dir}/{lora_filename}\"\n os.makedirs(trainable_weights_dir, exist_ok=True)\n\n ext = '.safetensors'\n # Create LoRA out filename.\n lora_out_file = f\"{output_dir}/webui_{lora_filename}{ext}\"\n\n if not only_webui:\n save_path_full_weights = lora_out_file_full_weight + ext\n\n save_path = lora_out_file\n\n if not only_webui:\n for i, model in enumerate([unet, text_encoder]):\n if save_text_weights and i == 1:\n non_webui_weights = save_path_full_weights.replace(ext, f\"_text_encoder{ext}\")\n\n else:\n non_webui_weights = save_path_full_weights.replace(ext, f\"_unet{ext}\")\n\n # Load only the LoRAs from the state dict.\n lora_dict = loralb.lora_state_dict(model, bias=lora_bias)\n \n # Save the models as fp32. This ensures we can finetune again without having to upcast. \n save_file(lora_dict, non_webui_weights)\n \n if save_for_webui:\n # Convert the keys to compvis model and webui\n unet_lora_dict = loralb.lora_state_dict(unet, bias=lora_bias) \n lora_dict_fp16 = unet_dict_converter(unet_lora_dict, strict_mapping=True)\n \n if save_text_weights:\n text_encoder_dict = loralb.lora_state_dict(text_encoder, bias=lora_bias)\n lora_dict_text_fp16 = text_dict_converter(text_encoder_dict)\n \n # Update the Unet dict to include text keys.\n lora_dict_fp16.update(lora_dict_text_fp16)\n\n # Cast tensors to fp16. It's assumed we won't be finetuning these.\n for k, v in lora_dict_fp16.items():\n lora_dict_fp16[k] = v.to(dtype=torch.float16)\n\n save_file(\n lora_dict_fp16, \n save_path, \n metadata=metadata\n )" }, { "identifier": "load_lora", "path": "stable_lora/lora.py", "snippet": "def load_lora(model, lora_path: str):\n try:\n if os.path.exists(lora_path):\n lora_dict = load_file(lora_path)\n model.load_state_dict(lora_dict, strict=False)\n\n except Exception as e:\n print(f\"Could not load your lora file: {e}\")" }, { "identifier": "set_mode_group", "path": "stable_lora/lora.py", "snippet": "def set_mode_group(models, train):\n for model in models: \n set_mode(model, train)\n model.train(train)" } ]
import os import torch import uuid from logging import warnings from typing import Union from types import SimpleNamespace from models.unet_3d_condition_mask import UNet3DConditionModel from transformers import CLIPTextModel from utils.convert_diffusers_to_original_ms_text_to_video import convert_unet_state_dict, convert_text_enc_state_dict_v20 from .lora import ( extract_lora_ups_down, inject_trainable_lora_extended, save_lora_weight, train_patch_pipe, monkeypatch_or_replace_lora, monkeypatch_or_replace_lora_extended ) from stable_lora.lora import ( activate_lora_train, add_lora_to, save_lora, load_lora, set_mode_group )
12,304
self.handle_lora_load(lora_file, lora_loader_args) else: print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...") except Exception as e: print(f"An error occured while loading a LoRA file: {e}") def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias): return_dict = lora_args.copy() if self.is_cloneofsimo_lora(): return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS) return_dict.update({ "model": model, "loras": self.get_lora_file_path(lora_path, model), "target_replace_module": replace_modules, "r": r }) if self.is_stable_lora(): KEYS = ['model', 'lora_path'] return_dict = filter_dict(return_dict, KEYS) return_dict.update({'model': model, 'lora_path': lora_path}) return return_dict def do_lora_injection( self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None, ): REPLACE_MODULES = replace_modules params = None negation = None is_injection_hybrid = False if self.is_cloneofsimo_lora(): is_injection_hybrid = True injector_args = lora_loader_args params, negation = self.lora_injector(**injector_args) for _up, _down in extract_lora_ups_down( model, target_replace_module=REPLACE_MODULES): if all(x is not None for x in [_up, _down]): print(f"Lora successfully injected into {model.__class__.__name__}.") break return params, negation, is_injection_hybrid if self.is_stable_lora(): injector_args = lora_args.copy() injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS) SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding] injector_args.update({ "model": model, "target_module": REPLACE_MODULES, "search_class": SEARCH_CLASS, "r": r, "dropout": dropout, "lora_bias": self.lora_bias }) activator = self.lora_injector(**injector_args) activator() return params, negation, is_injection_hybrid def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16): params = None negation = None lora_loader_args = self.get_lora_func_args( lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias ) if use_lora: params, negation, is_injection_hybrid = self.do_lora_injection( model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r ) if not is_injection_hybrid: self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args) params = model if params is None else params return params, negation def deactivate_lora_train(self, models, deactivate=True): """ Usage: Use before and after sampling previews. Currently only available for Stable LoRA. """ if self.is_stable_lora(): set_mode_group(models, not deactivate) def save_cloneofsimo_lora(self, model, save_path, step):
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector: return add_lora_to assert "LoRA Version does not exist." def check_lora_ext(self, lora_file: str): return lora_file.endswith(tuple(LORA_FILE_TYPES)) def get_lora_file_path( self, lora_path: str, model: Union[UNet3DConditionModel, CLIPTextModel] ): if os.path.exists(lora_path): lora_filenames = [fns for fns in os.listdir(lora_path)] is_lora = self.check_lora_ext(lora_path) is_unet = isinstance(model, UNet3DConditionModel) is_text = isinstance(model, CLIPTextModel) idx = 0 if is_unet else 1 base_name = FILE_BASENAMES[idx] for lora_filename in lora_filenames: is_lora = self.check_lora_ext(lora_filename) if not is_lora: continue if base_name in lora_filename: return os.path.join(lora_path, lora_filename) return None def handle_lora_load(self, file_name:str, lora_loader_args: dict = None): self.lora_loader(**lora_loader_args) print(f"Successfully loaded LoRA from: {file_name}") def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,): try: lora_file = self.get_lora_file_path(lora_path, model) if lora_file is not None: lora_loader_args.update({"lora_path": lora_file}) self.handle_lora_load(lora_file, lora_loader_args) else: print(f"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...") except Exception as e: print(f"An error occured while loading a LoRA file: {e}") def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias): return_dict = lora_args.copy() if self.is_cloneofsimo_lora(): return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS) return_dict.update({ "model": model, "loras": self.get_lora_file_path(lora_path, model), "target_replace_module": replace_modules, "r": r }) if self.is_stable_lora(): KEYS = ['model', 'lora_path'] return_dict = filter_dict(return_dict, KEYS) return_dict.update({'model': model, 'lora_path': lora_path}) return return_dict def do_lora_injection( self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None, ): REPLACE_MODULES = replace_modules params = None negation = None is_injection_hybrid = False if self.is_cloneofsimo_lora(): is_injection_hybrid = True injector_args = lora_loader_args params, negation = self.lora_injector(**injector_args) for _up, _down in extract_lora_ups_down( model, target_replace_module=REPLACE_MODULES): if all(x is not None for x in [_up, _down]): print(f"Lora successfully injected into {model.__class__.__name__}.") break return params, negation, is_injection_hybrid if self.is_stable_lora(): injector_args = lora_args.copy() injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS) SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding] injector_args.update({ "model": model, "target_module": REPLACE_MODULES, "search_class": SEARCH_CLASS, "r": r, "dropout": dropout, "lora_bias": self.lora_bias }) activator = self.lora_injector(**injector_args) activator() return params, negation, is_injection_hybrid def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16): params = None negation = None lora_loader_args = self.get_lora_func_args( lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias ) if use_lora: params, negation, is_injection_hybrid = self.do_lora_injection( model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r ) if not is_injection_hybrid: self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args) params = model if params is None else params return params, negation def deactivate_lora_train(self, models, deactivate=True): """ Usage: Use before and after sampling previews. Currently only available for Stable LoRA. """ if self.is_stable_lora(): set_mode_group(models, not deactivate) def save_cloneofsimo_lora(self, model, save_path, step):
def save_lora(model, name, condition, replace_modules, step, save_path):
11
2023-12-07 08:26:29+00:00
16k
octo-models/octo
scripts/finetune.py
[ { "identifier": "make_single_dataset", "path": "octo/data/dataset.py", "snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwargs. Returns a dataset of trajectories.\n\n Args:\n dataset_kwargs: kwargs passed to `make_dataset_from_rlds` that are dataset-specific.\n train: whether this is a training or validation dataset.\n traj_transform_kwargs: kwargs passed to 'apply_trajectory_transforms'.\n frame_transform_kwargs: kwargs passed to 'get_frame_transforms'.\n \"\"\"\n dataset, dataset_statistics = make_dataset_from_rlds(\n **dataset_kwargs,\n train=train,\n )\n dataset = apply_trajectory_transforms(dataset, **traj_transform_kwargs, train=train)\n dataset = apply_frame_transforms(dataset, **frame_transform_kwargs, train=train)\n\n # this seems to reduce memory usage without affecting speed\n dataset = dataset.with_ram_budget(1)\n\n # save for later\n dataset.dataset_statistics = dataset_statistics\n return dataset" }, { "identifier": "OctoModel", "path": "octo/model/octo_model.py", "snippet": "class OctoModel:\n \"\"\"Recommended way of interacting with Octo models.\n\n Usage for inference:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> tasks = model.create_tasks(texts=[\"go to the red room\"])\n >>> # or tasks = model.create_tasks(goals={\"image_primary\": goal_images})\n >>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))\n >>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,\n # un-normalize them using model.dataset_statistics\n\n Usage for finetuning:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> train_state = octo.utils.train_utils.TrainState.create(\n rng=jax.random.PRNGKey(0),\n model=model,\n tx=optax.adamw(...)\n )\n >>> # access params through train_state.model.params\n >>> train_state, metrics = your_update_function(train_state, batch)\n >>> # when it's time to save (note that this only saves the model parameters,\n >>> # not the full optimizer state)\n >>> train_state.model.save_pretrained(step, save_dir)\n\n Usage for pretraining:\n\n >>> model = OctoModel.from_config(\n config,\n example_batch,\n text_processor\n ) # initializes params\n >>> # Continue as in finetuning example\n\n See full usage examples in train.py and finetune.py.\n\n \"\"\"\n\n module: OctoModule = struct.field(pytree_node=False)\n text_processor: TextProcessor = struct.field(pytree_node=False)\n config: Config = struct.field(pytree_node=False)\n params: Params\n example_batch: Data\n dataset_statistics: Optional[Data]\n\n def create_tasks(\n self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None\n ):\n \"\"\"Creates tasks dict from goals and texts.\n\n Args:\n goals: if not None, dict of arrays with shape (batch_size, *)\n texts: if not None, list of texts of length batch_size\n\n Omit images to run the language-conditioned model, and omit texts to run the\n goal-conditioned model.\n \"\"\"\n assert goals is not None or texts is not None\n tasks = {\"pad_mask_dict\": {}}\n if goals is not None:\n tasks.update(goals)\n tasks[\"pad_mask_dict\"].update(\n {k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}\n )\n else:\n batch_size = len(texts)\n tasks.update(\n {\n k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)\n for k, v in self.example_batch[\"task\"].items()\n if k not in (\"pad_mask_dict\", \"language_instruction\")\n }\n )\n tasks[\"pad_mask_dict\"].update(\n {\n k: np.zeros(batch_size, dtype=bool)\n for k in tasks.keys()\n if k != \"pad_mask_dict\"\n }\n )\n\n if texts is not None:\n assert self.text_processor is not None\n tasks[\"language_instruction\"] = texts\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.ones(\n len(texts), dtype=bool\n )\n else:\n batch_size = jax.tree_leaves(goals)[0].shape[0]\n tasks[\"language_instruction\"] = [\"\"] * batch_size\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.zeros(\n batch_size, dtype=bool\n )\n\n if self.text_processor is not None:\n tasks[\"language_instruction\"] = self.text_processor.encode(\n tasks[\"language_instruction\"]\n )\n else:\n del tasks[\"language_instruction\"]\n\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n return tasks\n\n @partial(jax.jit, static_argnames=(\"train\",))\n def run_transformer(\n self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False\n ):\n \"\"\"Runs the transformer, but does shape checking on the inputs.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *shape).\n Shape must be consistent with self.example_batch[\"observation\"]\n tasks: dict of tasks of shape (batch_size, *shape)\n Shape must be consistent with self.example_batch[\"task\"]\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n \"\"\"\n _verify_shapes(\n observations,\n \"observations\",\n self.example_batch[\"observation\"],\n starting_dim=2,\n )\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n\n return self.module.apply(\n {\"params\": self.params},\n observations,\n tasks,\n pad_mask,\n train=train,\n method=\"octo_transformer\",\n )\n\n @partial(jax.jit, static_argnames=(\"train\", \"sample_shape\", \"argmax\"))\n def sample_actions(\n self,\n observations: Data,\n tasks: Data,\n pad_mask: Optional[ArrayLike] = None,\n train: bool = False,\n argmax: bool = False,\n sample_shape: Tuple[int, ...] = (),\n rng: Optional[PRNGKey] = None,\n temperature: float = 1.0,\n ):\n \"\"\"Samples actions from the model. See `action_heads.py` for more info.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *)\n tasks: dict of tasks of shape (batch_size, *)\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n ...see `action_heads.py` for the rest of the kwargs.\n Returns:\n actions: (*sample_shape, batch_size, pred_horizon, action_dim)\n \"\"\"\n if pad_mask is None:\n pad_mask = observations[\"pad_mask\"]\n\n transformer_outputs = self.run_transformer(\n observations, tasks, pad_mask, train=train\n )\n action_head: ActionHead = self.module.bind({\"params\": self.params}).heads[\n \"action\"\n ]\n return action_head.predict_action(\n transformer_outputs,\n train=train,\n argmax=argmax,\n sample_shape=sample_shape,\n rng=rng,\n temperature=temperature,\n )\n\n @classmethod\n def load_pretrained(\n cls,\n checkpoint_path: str,\n step: Optional[int] = None,\n ) -> \"OctoModel\":\n \"\"\"Loads a model from a checkpoint that was saved via `save_pretrained`.\n\n Args:\n checkpoint_path (str): A path to either a directory of checkpoints or a single checkpoint.\n step (int, optional): If multiple checkpoints are present, which one to load. Defaults to the latest.\n \"\"\"\n if checkpoint_path.startswith(\"hf://\"):\n if step:\n raise ValueError(\n \"You can't set config['pretrained_step'] when loading from HuggingFace.\"\n )\n checkpoint_path = _download_from_huggingface(\n checkpoint_path.removeprefix(\"hf://\")\n )\n\n # load config\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"config.json\"), \"r\"\n ) as f:\n config = json.load(f)\n\n # load example batch\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"example_batch.msgpack\"), \"rb\"\n ) as f:\n example_batch = flax.serialization.msgpack_restore(f.read())\n # shim for migrating from \"tasks\" to \"task\"\n if \"tasks\" in example_batch:\n example_batch[\"task\"] = example_batch.pop(\"tasks\")\n\n logging.debug(\n \"Model was trained with observations: %s\",\n flax.core.pretty_repr(\n jax.tree_map(jnp.shape, example_batch[\"observation\"])\n ),\n )\n logging.debug(\n \"Model was trained with tasks: %s\",\n flax.core.pretty_repr(jax.tree_map(jnp.shape, example_batch[\"task\"])),\n )\n\n # load dataset statistics\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"dataset_statistics.json\"), \"r\"\n ) as f:\n dataset_statistics = json.load(f)\n dataset_statistics = jax.tree_map(\n np.array, dataset_statistics, is_leaf=lambda x: not isinstance(x, dict)\n )\n\n # create model def (an OctoModule)\n module = OctoModule.create(**config[\"model\"])\n # infer params shape without actually doing any computation\n params_shape = jax.eval_shape(\n partial(module.init, train=False),\n jax.random.PRNGKey(0),\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )[\"params\"]\n # restore params, checking to make sure the shape matches\n checkpointer = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n step = step if step is not None else checkpointer.latest_step()\n params = checkpointer.restore(step, params_shape)\n\n if config[\"text_processor\"] is not None:\n text_processor = ModuleSpec.instantiate(config[\"text_processor\"])()\n else:\n text_processor = None\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def save_pretrained(\n self,\n step: int,\n checkpoint_path: Optional[str] = None,\n checkpoint_manager: Optional[orbax.checkpoint.CheckpointManager] = None,\n ):\n \"\"\"Saves a model, as well as corresponding metadata needed for `load_pretrained`. Takes either a\n pre-existing checkpoint manager (which already knows where to save the checkpoint) or a path to a\n directory to save the checkpoint to.\n\n Args:\n step (int): Step number.\n checkpoint_path (str, optional): Path to save the checkpoint.\n checkpoint_manager (optional): Checkpoint manager to save the checkpoint.\n params (optional): Params to save. If None, uses self.params.\n \"\"\"\n if (checkpoint_path is None) == (checkpoint_manager is None):\n raise ValueError(\n \"Must provide exactly one of checkpoint_path or checkpoint_manager.\"\n )\n if checkpoint_manager is None:\n checkpoint_manager = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n if checkpoint_path is None:\n checkpoint_path = str(checkpoint_manager._directory)\n\n # save params\n checkpoint_manager.save(\n step,\n self.params,\n {\"save_args\": orbax_utils.save_args_from_target(self.params)},\n )\n\n if jax.process_index() == 0:\n # save config\n config_path = tf.io.gfile.join(checkpoint_path, \"config.json\")\n if not tf.io.gfile.exists(config_path):\n with tf.io.gfile.GFile(config_path, \"w\") as f:\n json.dump(self.config, f)\n\n # save example batch\n example_batch_path = tf.io.gfile.join(\n checkpoint_path, \"example_batch.msgpack\"\n )\n if not tf.io.gfile.exists(example_batch_path):\n with tf.io.gfile.GFile(example_batch_path, \"wb\") as f:\n f.write(flax.serialization.msgpack_serialize(self.example_batch))\n\n # save dataset statistics\n dataset_statistics_path = tf.io.gfile.join(\n checkpoint_path, \"dataset_statistics.json\"\n )\n if not tf.io.gfile.exists(dataset_statistics_path):\n with tf.io.gfile.GFile(dataset_statistics_path, \"w\") as f:\n json.dump(\n jax.tree_map(lambda x: x.tolist(), self.dataset_statistics),\n f,\n )\n\n @classmethod\n def from_config(\n cls,\n config: Config,\n example_batch: Data,\n text_processor: Optional[Any] = None,\n verbose: bool = False,\n rng: Optional[PRNGKey] = None,\n dataset_statistics: Optional[Data] = None,\n ):\n \"\"\"Initializes a model with a fresh set of weights from a given config + example_batch.\n\n Args:\n config (Dict[str, Any]): Config dict. The only required key is \"model\", but other configuration\n may be saved for posterity.\n example_batch (Dict[str, Any]): Example batch.\n text_processor (Any, optional): Preprocessor for text inputs.\n verbose (bool, optional): Whether to print out a summary of the model.\n rng (Optional[PRNGKey], optional): RNG key for initializing the model.\n dataset_statistics (Optional[Dict[str, Any]], optional): Dataset statistics.\n \"\"\"\n module = OctoModule.create(**config[\"model\"])\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n example_batch = multihost_utils.process_allgather(example_batch)\n example_batch = jax.tree_map(lambda x: x[:1], example_batch)\n\n init_args = (\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )\n\n if verbose:\n print(\n module.tabulate(rng, *init_args, train=False, verbose=True, depth=2)\n ) # Prints out the parameter count of our model, and tokenizer details\n\n @jax.jit\n def _init(rng):\n return module.init(rng, *init_args, train=False)\n\n params = _init(rng)[\"params\"]\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def get_pretty_spec(self):\n \"\"\"Brief summary of the model's expected inputs and outputs.\"\"\"\n # TODO: generalize this to print out proprio when it is being tokenized\n window_size = self.example_batch[\"observation\"][\"pad_mask\"].shape[1]\n\n observation_space = {\n k: (\"batch\", \"history_window\", *v.shape[2:])\n for k, v in self.example_batch[\"observation\"].items()\n if k.startswith(\"image\")\n }\n task_space = {\n k: (\"batch\", *v.shape[1:])\n for k, v in self.example_batch[\"task\"].items()\n if k.startswith(\"image\")\n }\n if self.text_processor is not None:\n task_space[\"language_instruction\"] = jax.tree_map(\n lambda arr: (\"batch\", *arr.shape[1:]),\n self.example_batch[\"task\"][\"language_instruction\"],\n )\n\n try:\n action_head = self.module.heads[\"action\"]\n action_head_repr = str(action_head.__class__)\n action_dim, pred_horizon = action_head.action_dim, action_head.pred_horizon\n except:\n action_head_repr, action_dim, pred_horizon = \"\", None, None\n\n return SPEC_TEMPLATE.format(\n window_size=window_size,\n observation_space=flax.core.pretty_repr(observation_space),\n task_space=flax.core.pretty_repr(task_space),\n action_head_repr=action_head_repr,\n action_dim=action_dim,\n pred_horizon=pred_horizon,\n )" }, { "identifier": "initialize_compilation_cache", "path": "octo/utils/jax_utils.py", "snippet": "def initialize_compilation_cache(\n cache_dir=os.path.expanduser(\"~/.jax_compilation_cache\"),\n):\n \"\"\"Initializes the Jax persistent compilation cache.\"\"\"\n compilation_cache.initialize_cache(cache_dir)\n for logger in [logging.getLogger(name) for name in logging.root.manager.loggerDict]:\n logger.addFilter(\n lambda record: \"Not writing persistent cache entry for\"\n not in record.getMessage()\n )" }, { "identifier": "ModuleSpec", "path": "octo/utils/spec.py", "snippet": "class ModuleSpec(TypedDict):\n \"\"\"A JSON-serializable representation of a function or class with some default args and kwargs to pass to\n it. Useful for specifying a particular class or function in a config file, while keeping it serializable\n and overridable from the command line using ml_collections.\n\n Usage:\n\n # Preferred way to create a spec:\n >>> from octo.model.components.transformer import Transformer\n >>> spec = ModuleSpec.create(Transformer, num_layers=3)\n # Same as above using the fully qualified import string:\n >>> spec = ModuleSpec.create(\"octo.model.components.transformer:Transformer\", num_layers=3)\n\n # Usage:\n >>> ModuleSpec.instantiate(spec) == partial(Transformer, num_layers=3)\n # can pass additional kwargs at instantiation time\n >>> transformer = ModuleSpec.instantiate(spec, num_heads=8)\n\n Note: ModuleSpec is just an alias for a dictionary (that is strongly typed), not a real class. So from\n your code's perspective, it is just a dictionary.\n\n module (str): The module the callable is located in\n name (str): The name of the callable in the module\n args (tuple): The args to pass to the callable\n kwargs (dict): The kwargs to pass to the callable\n \"\"\"\n\n module: str\n name: str\n args: Tuple[Any, ...]\n kwargs: Dict[str, Any]\n\n @staticmethod\n def create(callable_or_full_name: Union[str, callable], *args, **kwargs) -> \"ModuleSpec\": # type: ignore\n \"\"\"Create a module spec from a callable or import string.\n\n Args:\n callable_or_full_name (str or object): Either the object itself or a fully qualified import string\n (e.g. \"octo.model.components.transformer:Transformer\")\n args (tuple, optional): Passed into callable upon instantiation.\n kwargs (dict, optional): Passed into callable upon instantiation.\n \"\"\"\n if isinstance(callable_or_full_name, str):\n assert callable_or_full_name.count(\":\") == 1, (\n \"If passing in a string, it must be a fully qualified import string \"\n \"(e.g. 'octo.model.components.transformer:Transformer')\"\n )\n module, name = callable_or_full_name.split(\":\")\n else:\n module, name = _infer_full_name(callable_or_full_name)\n\n return ModuleSpec(module=module, name=name, args=args, kwargs=kwargs)\n\n @staticmethod\n def instantiate(spec: \"ModuleSpec\"): # type: ignore\n if set(spec.keys()) != {\"module\", \"name\", \"args\", \"kwargs\"}:\n raise ValueError(\n f\"Expected ModuleSpec, but got {spec}. \"\n \"ModuleSpec must have keys 'module', 'name', 'args', and 'kwargs'.\"\n )\n cls = _import_from_string(spec[\"module\"], spec[\"name\"])\n return partial(cls, *spec[\"args\"], **spec[\"kwargs\"])" }, { "identifier": "RolloutVisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class RolloutVisualizationCallback(Callback):\n visualizer_kwargs_list: Sequence[Mapping[str, Any]]\n text_processor: TextProcessor\n trajs_for_rollouts: int\n model_pred_horizon: int\n history_length: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.rollout_visualizers = [\n RolloutVisualizer(\n text_processor=self.text_processor,\n history_length=self.history_length,\n action_chunk=self.model_pred_horizon\n if \"pred_horizon\" not in kwargs\n else kwargs[\"pred_horizon\"],\n **kwargs,\n )\n for kwargs in self.visualizer_kwargs_list\n ]\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=1,\n policy_mode=mode,\n )\n for mode in self.modes_to_evaluate\n }\n for rollout_visualizer in self.rollout_visualizers:\n for mode, policy_fn in modal_policy_fns.items():\n logging.info(f\"Running rollouts for {rollout_visualizer.env_name}\")\n rollout_infos = rollout_visualizer.run_rollouts(\n policy_fn, n_rollouts=self.trajs_for_rollouts\n )\n wandb_metrics[\n f\"rollouts_{rollout_visualizer.env_name}_chunk{rollout_visualizer.action_chunk}/{mode}\"\n ] = rollout_infos\n\n return wandb_metrics" }, { "identifier": "SaveCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class SaveCallback(Callback):\n \"\"\"Callback that saves checkpoints to `save_dir`. If `save_dir` is None, does nothing.\"\"\"\n\n save_dir: Optional[str]\n\n def __post_init__(self):\n if self.save_dir is not None:\n if not self.save_dir.startswith(\"gs://\"):\n self.save_dir = os.path.abspath(self.save_dir)\n if jax.process_index() == 0:\n tf.io.gfile.makedirs(self.save_dir)\n logging.info(f\"Created {self.save_dir}\")\n # make checkpointers\n # only keep latest full TrainState\n self.state_checkpointer = orbax.checkpoint.CheckpointManager(\n tf.io.gfile.join(self.save_dir, \"state\"),\n orbax.checkpoint.PyTreeCheckpointer(),\n options=orbax.checkpoint.CheckpointManagerOptions(\n max_to_keep=1,\n ),\n )\n # keep every params checkpoint\n self.params_checkpointer = orbax.checkpoint.CheckpointManager(\n self.save_dir,\n orbax.checkpoint.PyTreeCheckpointer(),\n )\n\n def __call__(self, train_state: TrainState, step: int):\n if self.save_dir is not None:\n train_state.model.save_pretrained(\n step, checkpoint_manager=self.params_checkpointer\n )\n self.state_checkpointer.save(\n step,\n train_state,\n {\"save_args\": orbax_utils.save_args_from_target(train_state)},\n )" }, { "identifier": "ValidationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class ValidationCallback(Callback):\n loss_fn: Callable\n process_batch_fn: Callable[[Data], Data]\n text_processor: Optional[TextProcessor]\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n val_shuffle_buffer_size: int\n num_val_batches: int\n modes_to_evaluate: Sequence[str] = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n if self.text_processor is not None:\n self.zero_text = jax.tree_map(\n lambda x: x[0], self.text_processor.encode(\"\")\n )\n self.val_iterators = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n val_iterator = (\n val_dataset.unbatch()\n .shuffle(self.val_shuffle_buffer_size)\n .repeat()\n .batch(self.dataset_kwargs[\"batch_size\"])\n .iterator(prefetch=0)\n )\n val_iterator = map(self.process_batch_fn, val_iterator)\n self.val_iterators[single_dataset_kwargs[\"name\"]] = val_iterator\n\n @partial(\n jax.jit,\n out_shardings=jax.sharding.PositionalSharding(jax.devices()).replicate(),\n )\n def eval_step(state: TrainState, batch: Data):\n loss_fn_partial = partial(\n self.loss_fn,\n params=state.model.params,\n rng=state.rng,\n train=False,\n )\n all_tasks = {}\n\n if \"base\" in self.modes_to_evaluate:\n all_tasks[\"base\"] = batch[\"task\"]\n if \"image_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"image_conditioned\"] = remove_text(\n batch[\"task\"], self.zero_text\n )\n if \"text_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"text_conditioned\"] = remove_images(batch[\"task\"])\n\n if \"unconditioned\" in self.modes_to_evaluate:\n all_tasks[\"unconditioned\"] = remove_text(\n remove_images(batch[\"task\"]), self.zero_text\n )\n return {\n k: loss_fn_partial(batch=flax.core.copy(batch, {\"task\": tasks}))[1]\n for k, tasks in all_tasks.items()\n }\n\n self.eval_step = eval_step\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n for name, val_data_iter in self.val_iterators.items():\n metrics = []\n for _, batch in tqdm.tqdm(\n zip(range(self.num_val_batches), val_data_iter),\n total=self.num_val_batches,\n desc=name,\n ):\n metrics.append(self.eval_step(train_state, batch))\n metrics = jax.tree_map(lambda *xs: np.mean(xs), *metrics)\n wandb_metrics[f\"validation_{name}\"] = metrics\n return wandb_metrics" }, { "identifier": "VisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class VisualizationCallback(Callback):\n text_processor: TextProcessor\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n eval_batch_size: int\n trajs_for_metrics: int\n trajs_for_viz: int\n samples_per_state: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.visualizers = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n self.visualizers[single_dataset_kwargs[\"name\"]] = Visualizer(\n val_dataset, text_processor=self.text_processor, freeze_trajs=False\n )\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: batched_apply(\n partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=self.samples_per_state,\n policy_mode=mode,\n ),\n self.eval_batch_size,\n )\n for mode in self.modes_to_evaluate\n }\n\n for name, visualizer in self.visualizers.items():\n for mode, policy_fn in modal_policy_fns.items():\n if self.trajs_for_metrics > 0:\n raw_infos = visualizer.raw_evaluations(\n policy_fn, max_trajs=self.trajs_for_metrics\n )\n metrics = visualizer.metrics_for_wandb(raw_infos)\n wandb_metrics[f\"offline_metrics_{name}/{mode}\"] = metrics\n if self.trajs_for_viz > 0:\n images = visualizer.visualize_for_wandb(\n policy_fn, max_trajs=self.trajs_for_viz\n )\n wandb_metrics[f\"visualizations_{name}/{mode}\"] = images\n return wandb_metrics" }, { "identifier": "check_config_diff", "path": "octo/utils/train_utils.py", "snippet": "def check_config_diff(new_conf: Config, old_conf: Config, silent: bool = False):\n \"\"\"Checks for differences between new config and old config dicts.\"\"\"\n new_conf_flat = flax.traverse_util.flatten_dict(\n new_conf.to_dict() if isinstance(new_conf, ConfigDict) else new_conf\n )\n old_conf_flat = flax.traverse_util.flatten_dict(\n old_conf.to_dict() if isinstance(old_conf, ConfigDict) else old_conf\n )\n\n # check for missing / new keys\n if set(new_conf_flat.keys()) != set(old_conf_flat.keys()) and not silent:\n logging.info(\n \"New config contains extra items: %s\",\n set(new_conf_flat.keys()) - set(old_conf_flat.keys()),\n )\n logging.info(\n \"New config doesn't contain items: %s\",\n set(old_conf_flat.keys()) - set(new_conf_flat.keys()),\n )\n\n # print differing key values\n mismatched_keys = {\n k: (new_conf_flat[k], old_conf_flat[k])\n for k in new_conf_flat\n if k in old_conf_flat and new_conf_flat[k] != old_conf_flat[k]\n }\n if mismatched_keys and not silent:\n logging.info(\n \"New config contains keys with new values: %s\",\n flax.core.pretty_repr(mismatched_keys),\n )\n return mismatched_keys or (set(new_conf_flat.keys()) != set(old_conf_flat.keys()))" }, { "identifier": "create_optimizer", "path": "octo/utils/train_utils.py", "snippet": "def create_optimizer(\n params_or_params_shape: Params, **kwargs: dict\n) -> optax.GradientTransformation:\n \"\"\"Creates optimizer for Octo.\n\n kwargs are the kwargs for optax.adamw; if the \"learning_rate\" key is a dict, it is interpreted\n as the kwargs for create_lr_schedule (see above), otherwise it is interpreted as a constant\n learning rate.\n\n If clip_gradient is specified, then gradient clipping is applied. If frozen_keys is specified,\n then those parameters are frozen (i.e. not updated) during training.\n\n Returns:\n tx: an Optax optimizer\n lr_callable: Function that takes the current step and returns the learning rate\n \"\"\"\n if isinstance(kwargs[\"learning_rate\"], dict):\n lr_callable = create_lr_schedule(**kwargs[\"learning_rate\"])\n else:\n lr_callable = lambda _: kwargs[\"learning_rate\"]\n kwargs[\"learning_rate\"] = lr_callable\n\n # Following ViT, timm, MAE: this mask skips weight decay on biases and LayerNorm parameters\n wd_mask = jax.tree_util.tree_map_with_path(\n lambda path, x: \"kernel\" in jax.tree_util.keystr(path), params_or_params_shape\n )\n\n clip_gradient = kwargs.pop(\"clip_gradient\", None)\n frozen_keys = kwargs.pop(\"frozen_keys\", None)\n grad_accumulation_steps = kwargs.pop(\"grad_accumulation_steps\", None)\n\n tx = optax.adamw(mu_dtype=jnp.bfloat16, **kwargs, mask=wd_mask)\n if grad_accumulation_steps:\n tx = optax.MultiSteps(tx, grad_accumulation_steps)\n if clip_gradient is not None:\n tx = optax.chain(\n optax.clip_by_global_norm(clip_gradient),\n tx,\n )\n\n if frozen_keys:\n tx, param_partitions = freeze_weights(\n tx, params_or_params_shape, frozen_keys, return_partitions=True\n )\n zero_frozen_params = lambda params: jax.tree_map(\n lambda x, y: x if y == \"trainable\" else jnp.zeros(()),\n params,\n param_partitions,\n )\n param_norm_callable = lambda params: optax.global_norm(\n zero_frozen_params(params)\n )\n else:\n param_norm_callable = optax.global_norm\n\n return tx, lr_callable, param_norm_callable" }, { "identifier": "format_name_with_config", "path": "octo/utils/train_utils.py", "snippet": "def format_name_with_config(name, config):\n \"\"\"Formats a name string with a config dict.\n\n Formatting keys may be specified as {key} or {full_path_to_key_with_underscores}.\n\n Example:\n name = \"model_{model_type}_{model_size}\"\n config = {\"model_type\": \"transformer\", \"model_size\": \"small\"}\n format_name_with_config(name, config) -> \"model_transformer_small\"\n \"\"\"\n config_flat = flax.traverse_util.flatten_dict(config, sep=\"_\")\n config_final = {k.split(\"_\")[-1]: v for k, v in config_flat.items()}\n format_dict = {**config_final, **config_flat}\n return name.format(**format_dict)" }, { "identifier": "merge_params", "path": "octo/utils/train_utils.py", "snippet": "def merge_params(target_params: Params, pretrained_params: Params) -> Params:\n \"\"\"Copies pre-trained params into target_params for every param that has corresponding key + shape.\"\"\"\n flat_target_params = flax.traverse_util.flatten_dict(target_params)\n flat_pretrained_params = flax.traverse_util.flatten_dict(pretrained_params)\n keys_to_update = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape == flat_pretrained_params[k].shape\n ]\n missing_keys = [k for k in flat_target_params if k not in flat_pretrained_params]\n shape_mismatch_keys = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape != flat_pretrained_params[k].shape\n ]\n\n for key in keys_to_update:\n logging.debug(f\"Param copied from pre-trained: {'.'.join(key)}\")\n if missing_keys or shape_mismatch_keys:\n logging.info(\"########## Parameters skipped during model loading: ##########\")\n for key in missing_keys:\n logging.info(\n f\"Param missing in pre-trained model, skipping: {'.'.join(key)}\"\n )\n for key in shape_mismatch_keys:\n logging.info(\n f\"Param with differing shape in pre-trained model, skipping: {'.'.join(key)}\"\n )\n\n flat_target_params = flax.core.copy(\n flat_target_params, {k: flat_pretrained_params[k] for k in keys_to_update}\n )\n target_params = flax.traverse_util.unflatten_dict(flat_target_params)\n return target_params" }, { "identifier": "process_text", "path": "octo/utils/train_utils.py", "snippet": "def process_text(batch: Data, text_processor: Optional[TextProcessor]) -> Data:\n \"\"\"Encodes the language instruction inside the tasks for a batch.\n\n If the text processor is None, removes language entirely from the tasks.\n Expects batch to be a nested dictionary, where\n batch[\"task\"][\"language_instruction\"] is a sequence of byte strings\n \"\"\"\n if text_processor is None:\n batch[\"task\"].pop(\"language_instruction\")\n else:\n batch[\"task\"][\"language_instruction\"] = text_processor.encode(\n [s.decode(\"utf-8\") for s in batch[\"task\"][\"language_instruction\"]]\n )\n return batch" }, { "identifier": "Timer", "path": "octo/utils/train_utils.py", "snippet": "class Timer:\n \"\"\"\n Timer utility. Usage:\n\n timer = Timer()\n with timer(\"foo\"):\n do_something()\n\n timer.tick(\"bar\")\n do_something_else()\n timer.tock(\"bar\")\n\n timer.get_average_times() -> {\"foo\": 0.1, \"bar\": 0.2}\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n @contextmanager\n def __call__(self, key):\n self.tick(key)\n try:\n yield None\n finally:\n self.tock(key)\n\n def reset(self):\n self.counts = defaultdict(int)\n self.times = defaultdict(float)\n self.start_times = {}\n\n def tick(self, key):\n if key in self.start_times:\n raise ValueError(f\"Timer is already ticking for key: {key}\")\n self.start_times[key] = time.time()\n\n def tock(self, key):\n if key not in self.start_times:\n raise ValueError(f\"Timer is not ticking for key: {key}\")\n self.counts[key] += 1\n self.times[key] += time.time() - self.start_times[key]\n del self.start_times[key]\n\n def get_average_times(self, reset=True):\n ret = {key: self.times[key] / self.counts[key] for key in self.counts}\n if reset:\n self.reset()\n return ret" }, { "identifier": "TrainState", "path": "octo/utils/train_utils.py", "snippet": "class TrainState:\n rng: PRNGKey\n model: OctoModel\n step: int\n opt_state: optax.OptState\n tx: optax.GradientTransformation = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n rng: PRNGKey,\n model: OctoModel,\n tx: optax.GradientTransformation,\n ):\n opt_state = tx.init(model.params)\n return cls(\n rng=rng,\n model=model,\n step=0,\n opt_state=opt_state,\n tx=tx,\n )\n\n def apply_gradients(self, *, grads, rng):\n updates, new_opt_state = self.tx.update(\n grads, self.opt_state, self.model.params\n )\n new_params = optax.apply_updates(self.model.params, updates)\n\n return self.replace(\n step=self.step + 1,\n model=self.model.replace(params=new_params),\n opt_state=new_opt_state,\n rng=rng,\n )" } ]
import datetime import imp import os import flax import jax import optax import tensorflow as tf import tqdm import wandb from functools import partial from absl import app, flags, logging from flax.traverse_util import flatten_dict from jax.sharding import Mesh, NamedSharding, PartitionSpec from ml_collections import config_flags, ConfigDict from octo.data.dataset import make_single_dataset from octo.model.octo_model import OctoModel from octo.utils.jax_utils import initialize_compilation_cache from octo.utils.spec import ModuleSpec from octo.utils.train_callbacks import ( RolloutVisualizationCallback, SaveCallback, ValidationCallback, VisualizationCallback, ) from octo.utils.train_utils import ( check_config_diff, create_optimizer, format_name_with_config, merge_params, process_text, Timer, TrainState, ) from jax_smi import initialise_tracking # type: ignore
11,474
params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs]
try: initialise_tracking() except ImportError: pass FLAGS = flags.FLAGS flags.DEFINE_string("name", "experiment", "Experiment name.") flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)") default_config_file = os.path.join( os.path.dirname(__file__), "configs/finetune_config.py" ) config_flags.DEFINE_config_file( "config", default_config_file, "File path to the training hyperparameter configuration.", lock_config=False, ) def main(_): initialize_compilation_cache() devices = jax.devices() logging.info( f""" Octo Finetuning Script ====================== Pretrained model: {FLAGS.config.pretrained_path} Finetuning Dataset: {FLAGS.config.dataset_kwargs.name} Data dir: {FLAGS.config.dataset_kwargs.data_dir} Task Modality: {FLAGS.config.modality} Finetuning Mode: {FLAGS.config.finetuning_mode} # Devices: {jax.device_count()} Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device) # Steps: {FLAGS.config.num_steps} """ ) ######### # # Setup Jax Data Parallelism # ######### assert ( FLAGS.config.batch_size % len(devices) == 0 ), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, ) merged_params = merge_params(model.params, pretrained_model.params) model = model.replace(params=merged_params) del pretrained_model ######### # # Setup Optimizer and Train State # ######### params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs]
val_callback = ValidationCallback(
6
2023-12-13 09:58:56+00:00
16k
modelscope/richdreamer
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from einops import rearrange, repeat from functools import partial from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface,) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like,) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl,) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import (count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat,)
12,424
self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior):
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
8
2023-12-06 07:53:11+00:00
16k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/fcos.py
[ { "identifier": "batched_nms", "path": "annotator/oneformer/detectron2/layers/nms.py", "snippet": "def batched_nms(\r\n boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float\r\n):\r\n \"\"\"\r\n Same as torchvision.ops.boxes.batched_nms, but with float().\r\n \"\"\"\r\n assert boxes.shape[-1] == 4\r\n # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)\r\n # to decide whether to use coordinate trick or for loop to implement batched_nms. So we\r\n # just call it directly.\r\n # Fp16 does not have enough range for batched NMS, so adding float().\r\n return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)\r" }, { "identifier": "ShapeSpec", "path": "annotator/oneformer/detectron2/layers/shape_spec.py", "snippet": "class ShapeSpec:\r\n \"\"\"\r\n A simple structure that contains basic shape specification about a tensor.\r\n It is often used as the auxiliary inputs/outputs of models,\r\n to complement the lack of shape inference ability among pytorch modules.\r\n \"\"\"\r\n\r\n channels: Optional[int] = None\r\n height: Optional[int] = None\r\n width: Optional[int] = None\r\n stride: Optional[int] = None\r" }, { "identifier": "Boxes", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "class Boxes:\r\n \"\"\"\r\n This structure stores a list of boxes as a Nx4 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n\r\n Attributes:\r\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n if not isinstance(tensor, torch.Tensor):\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\r\n else:\r\n tensor = tensor.to(torch.float32)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"Boxes\":\r\n \"\"\"\r\n Clone the Boxes.\r\n\r\n Returns:\r\n Boxes\r\n \"\"\"\r\n return Boxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return Boxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\r\n return area\r\n\r\n def clip(self, box_size: Tuple[int, int]) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n \"\"\"\r\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\r\n h, w = box_size\r\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\r\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\r\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\r\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\r\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor:\r\n a binary vector which represents whether each box is empty\r\n (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2] - box[:, 0]\r\n heights = box[:, 3] - box[:, 1]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"Boxes\":\r\n \"\"\"\r\n Args:\r\n item: int, slice, or a BoolTensor\r\n\r\n Returns:\r\n Boxes: Create a new :class:`Boxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Boxes might share storage with this Boxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Boxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\r\n return Boxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"Boxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box.\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n inds_inside = (\r\n (self.tensor[..., 0] >= -boundary_threshold)\r\n & (self.tensor[..., 1] >= -boundary_threshold)\r\n & (self.tensor[..., 2] < width + boundary_threshold)\r\n & (self.tensor[..., 3] < height + boundary_threshold)\r\n )\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the box with horizontal and vertical scaling factors\r\n \"\"\"\r\n self.tensor[:, 0::2] *= scale_x\r\n self.tensor[:, 1::2] *= scale_y\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\r\n \"\"\"\r\n Concatenates a list of Boxes into a single Boxes\r\n\r\n Arguments:\r\n boxes_list (list[Boxes])\r\n\r\n Returns:\r\n Boxes: the concatenated Boxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, Boxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\r\n # https://github.com/pytorch/pytorch/issues/18627\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (4,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r" }, { "identifier": "pairwise_point_box_distance", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):\r\n \"\"\"\r\n Pairwise distance between N points and M boxes. The distance between a\r\n point and a box is represented by the distance from the point to 4 edges\r\n of the box. Distances are all positive when the point is inside the box.\r\n\r\n Args:\r\n points: Nx2 coordinates. Each row is (x, y)\r\n boxes: M boxes\r\n\r\n Returns:\r\n Tensor: distances of size (N, M, 4). The 4 values are distances from\r\n the point to the left, top, right, bottom of the box.\r\n \"\"\"\r\n x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)\r\n x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)\r\n return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)\r" }, { "identifier": "ImageList", "path": "annotator/oneformer/detectron2/structures/image_list.py", "snippet": "class ImageList(object):\r\n \"\"\"\r\n Structure that holds a list of images (of possibly\r\n varying sizes) as a single tensor.\r\n This works by padding the images to the same size.\r\n The original sizes of each image is stored in `image_sizes`.\r\n\r\n Attributes:\r\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\r\n During tracing, it becomes list[Tensor] instead.\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\r\n \"\"\"\r\n Arguments:\r\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\r\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\r\n be smaller than (H, W) due to padding.\r\n \"\"\"\r\n self.tensor = tensor\r\n self.image_sizes = image_sizes\r\n\r\n def __len__(self) -> int:\r\n return len(self.image_sizes)\r\n\r\n def __getitem__(self, idx) -> torch.Tensor:\r\n \"\"\"\r\n Access the individual image in its original size.\r\n\r\n Args:\r\n idx: int or slice\r\n\r\n Returns:\r\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\r\n \"\"\"\r\n size = self.image_sizes[idx]\r\n return self.tensor[idx, ..., : size[0], : size[1]]\r\n\r\n @torch.jit.unused\r\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\r\n cast_tensor = self.tensor.to(*args, **kwargs)\r\n return ImageList(cast_tensor, self.image_sizes)\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n @staticmethod\r\n def from_tensors(\r\n tensors: List[torch.Tensor],\r\n size_divisibility: int = 0,\r\n pad_value: float = 0.0,\r\n padding_constraints: Optional[Dict[str, int]] = None,\r\n ) -> \"ImageList\":\r\n \"\"\"\r\n Args:\r\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\r\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\r\n to the same shape with `pad_value`.\r\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\r\n the common height and width is divisible by `size_divisibility`.\r\n This depends on the model and many models need a divisibility of 32.\r\n pad_value (float): value to pad.\r\n padding_constraints (optional[Dict]): If given, it would follow the format as\r\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\r\n overwrite the above one if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n Returns:\r\n an `ImageList`.\r\n \"\"\"\r\n assert len(tensors) > 0\r\n assert isinstance(tensors, (tuple, list))\r\n for t in tensors:\r\n assert isinstance(t, torch.Tensor), type(t)\r\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\r\n\r\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\r\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\r\n max_size = torch.stack(image_sizes_tensor).max(0).values\r\n\r\n if padding_constraints is not None:\r\n square_size = padding_constraints.get(\"square_size\", 0)\r\n if square_size > 0:\r\n # pad to square.\r\n max_size[0] = max_size[1] = square_size\r\n if \"size_divisibility\" in padding_constraints:\r\n size_divisibility = padding_constraints[\"size_divisibility\"]\r\n if size_divisibility > 1:\r\n stride = size_divisibility\r\n # the last two dims are H,W, both subject to divisibility requirement\r\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\r\n\r\n # handle weirdness of scripting and tracing ...\r\n if torch.jit.is_scripting():\r\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\r\n else:\r\n if torch.jit.is_tracing():\r\n image_sizes = image_sizes_tensor\r\n\r\n if len(tensors) == 1:\r\n # This seems slightly (2%) faster.\r\n # TODO: check whether it's faster for multiple images as well\r\n image_size = image_sizes[0]\r\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\r\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\r\n else:\r\n # max_size can be a tensor in tracing mode, therefore convert to list\r\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\r\n device = (\r\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\r\n )\r\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\r\n batched_imgs = move_device_like(batched_imgs, tensors[0])\r\n for i, img in enumerate(tensors):\r\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\r\n # Tracing mode cannot capture `copy_()` of temporary locals\r\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\r\n\r\n return ImageList(batched_imgs.contiguous(), image_sizes)\r" }, { "identifier": "Instances", "path": "annotator/oneformer/detectron2/structures/instances.py", "snippet": "class Instances:\r\n \"\"\"\r\n This class represents a list of instances in an image.\r\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\r\n All fields must have the same ``__len__`` which is the number of instances.\r\n\r\n All other (non-field) attributes of this class are considered private:\r\n they must start with '_' and are not modifiable by a user.\r\n\r\n Some basic usage:\r\n\r\n 1. Set/get/check a field:\r\n\r\n .. code-block:: python\r\n\r\n instances.gt_boxes = Boxes(...)\r\n print(instances.pred_masks) # a tensor of shape (N, H, W)\r\n print('gt_masks' in instances)\r\n\r\n 2. ``len(instances)`` returns the number of instances\r\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\r\n and returns a new :class:`Instances`.\r\n Typically, ``indices`` is a integer vector of indices,\r\n or a binary mask of length ``num_instances``\r\n\r\n .. code-block:: python\r\n\r\n category_3_detections = instances[instances.pred_classes == 3]\r\n confident_detections = instances[instances.scores > 0.9]\r\n \"\"\"\r\n\r\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\r\n \"\"\"\r\n Args:\r\n image_size (height, width): the spatial size of the image.\r\n kwargs: fields to add to this `Instances`.\r\n \"\"\"\r\n self._image_size = image_size\r\n self._fields: Dict[str, Any] = {}\r\n for k, v in kwargs.items():\r\n self.set(k, v)\r\n\r\n @property\r\n def image_size(self) -> Tuple[int, int]:\r\n \"\"\"\r\n Returns:\r\n tuple: height, width\r\n \"\"\"\r\n return self._image_size\r\n\r\n def __setattr__(self, name: str, val: Any) -> None:\r\n if name.startswith(\"_\"):\r\n super().__setattr__(name, val)\r\n else:\r\n self.set(name, val)\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n if name == \"_fields\" or name not in self._fields:\r\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\r\n return self._fields[name]\r\n\r\n def set(self, name: str, value: Any) -> None:\r\n \"\"\"\r\n Set the field named `name` to `value`.\r\n The length of `value` must be the number of instances,\r\n and must agree with other existing fields in this object.\r\n \"\"\"\r\n with warnings.catch_warnings(record=True):\r\n data_len = len(value)\r\n if len(self._fields):\r\n assert (\r\n len(self) == data_len\r\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\r\n self._fields[name] = value\r\n\r\n def has(self, name: str) -> bool:\r\n \"\"\"\r\n Returns:\r\n bool: whether the field called `name` exists.\r\n \"\"\"\r\n return name in self._fields\r\n\r\n def remove(self, name: str) -> None:\r\n \"\"\"\r\n Remove the field called `name`.\r\n \"\"\"\r\n del self._fields[name]\r\n\r\n def get(self, name: str) -> Any:\r\n \"\"\"\r\n Returns the field called `name`.\r\n \"\"\"\r\n return self._fields[name]\r\n\r\n def get_fields(self) -> Dict[str, Any]:\r\n \"\"\"\r\n Returns:\r\n dict: a dict which maps names (str) to data of the fields\r\n\r\n Modifying the returned dict will modify this instance.\r\n \"\"\"\r\n return self._fields\r\n\r\n # Tensor-like methods\r\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\r\n \"\"\"\r\n Returns:\r\n Instances: all fields are called with a `to(device)`, if the field has this method.\r\n \"\"\"\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n if hasattr(v, \"to\"):\r\n v = v.to(*args, **kwargs)\r\n ret.set(k, v)\r\n return ret\r\n\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n item: an index-like object and will be used to index all the fields.\r\n\r\n Returns:\r\n If `item` is a string, return the data in the corresponding field.\r\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\r\n \"\"\"\r\n if type(item) == int:\r\n if item >= len(self) or item < -len(self):\r\n raise IndexError(\"Instances index out of range!\")\r\n else:\r\n item = slice(item, None, len(self))\r\n\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n ret.set(k, v[item])\r\n return ret\r\n\r\n def __len__(self) -> int:\r\n for v in self._fields.values():\r\n # use __len__ because len() has to be int and is not friendly to tracing\r\n return v.__len__()\r\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\r\n\r\n def __iter__(self):\r\n raise NotImplementedError(\"`Instances` object is not iterable!\")\r\n\r\n @staticmethod\r\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n instance_lists (list[Instances])\r\n\r\n Returns:\r\n Instances\r\n \"\"\"\r\n assert all(isinstance(i, Instances) for i in instance_lists)\r\n assert len(instance_lists) > 0\r\n if len(instance_lists) == 1:\r\n return instance_lists[0]\r\n\r\n image_size = instance_lists[0].image_size\r\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\r\n for i in instance_lists[1:]:\r\n assert i.image_size == image_size\r\n ret = Instances(image_size)\r\n for k in instance_lists[0]._fields.keys():\r\n values = [i.get(k) for i in instance_lists]\r\n v0 = values[0]\r\n if isinstance(v0, torch.Tensor):\r\n values = torch.cat(values, dim=0)\r\n elif isinstance(v0, list):\r\n values = list(itertools.chain(*values))\r\n elif hasattr(type(v0), \"cat\"):\r\n values = type(v0).cat(values)\r\n else:\r\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\r\n ret.set(k, values)\r\n return ret\r\n\r\n def __str__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={}, \".format(len(self))\r\n s += \"image_height={}, \".format(self._image_size[0])\r\n s += \"image_width={}, \".format(self._image_size[1])\r\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\r\n return s\r\n\r\n __repr__ = __str__\r" }, { "identifier": "get_event_storage", "path": "annotator/oneformer/detectron2/utils/events.py", "snippet": "def get_event_storage():\r\n \"\"\"\r\n Returns:\r\n The :class:`EventStorage` object that's currently being used.\r\n Throws an error if no :class:`EventStorage` is currently enabled.\r\n \"\"\"\r\n assert len(\r\n _CURRENT_STORAGE_STACK\r\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\r\n return _CURRENT_STORAGE_STACK[-1]\r" }, { "identifier": "DefaultAnchorGenerator", "path": "annotator/oneformer/detectron2/modeling/anchor_generator.py", "snippet": "class DefaultAnchorGenerator(nn.Module):\r\n \"\"\"\r\n Compute anchors in the standard ways described in\r\n \"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks\".\r\n \"\"\"\r\n\r\n box_dim: torch.jit.Final[int] = 4\r\n \"\"\"\r\n the dimension of each anchor box.\r\n \"\"\"\r\n\r\n @configurable\r\n def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5):\r\n \"\"\"\r\n This interface is experimental.\r\n\r\n Args:\r\n sizes (list[list[float]] or list[float]):\r\n If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes\r\n (i.e. sqrt of anchor area) to use for the i-th feature map.\r\n If ``sizes`` is list[float], ``sizes`` is used for all feature maps.\r\n Anchor sizes are given in absolute lengths in units of\r\n the input image; they do not dynamically scale if the input image size changes.\r\n aspect_ratios (list[list[float]] or list[float]): list of aspect ratios\r\n (i.e. height / width) to use for anchors. Same \"broadcast\" rule for `sizes` applies.\r\n strides (list[int]): stride of each input feature.\r\n offset (float): Relative offset between the center of the first anchor and the top-left\r\n corner of the image. Value has to be in [0, 1).\r\n Recommend to use 0.5, which means half stride.\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.strides = strides\r\n self.num_features = len(self.strides)\r\n sizes = _broadcast_params(sizes, self.num_features, \"sizes\")\r\n aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, \"aspect_ratios\")\r\n self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios)\r\n\r\n self.offset = offset\r\n assert 0.0 <= self.offset < 1.0, self.offset\r\n\r\n @classmethod\r\n def from_config(cls, cfg, input_shape: List[ShapeSpec]):\r\n return {\r\n \"sizes\": cfg.MODEL.ANCHOR_GENERATOR.SIZES,\r\n \"aspect_ratios\": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,\r\n \"strides\": [x.stride for x in input_shape],\r\n \"offset\": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,\r\n }\r\n\r\n def _calculate_anchors(self, sizes, aspect_ratios):\r\n cell_anchors = [\r\n self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)\r\n ]\r\n return BufferList(cell_anchors)\r\n\r\n @property\r\n @torch.jit.unused\r\n def num_cell_anchors(self):\r\n \"\"\"\r\n Alias of `num_anchors`.\r\n \"\"\"\r\n return self.num_anchors\r\n\r\n @property\r\n @torch.jit.unused\r\n def num_anchors(self):\r\n \"\"\"\r\n Returns:\r\n list[int]: Each int is the number of anchors at every pixel\r\n location, on that feature map.\r\n For example, if at every pixel we use anchors of 3 aspect\r\n ratios and 5 sizes, the number of anchors is 15.\r\n (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config)\r\n\r\n In standard RPN models, `num_anchors` on every feature map is the same.\r\n \"\"\"\r\n return [len(cell_anchors) for cell_anchors in self.cell_anchors]\r\n\r\n def _grid_anchors(self, grid_sizes: List[List[int]]):\r\n \"\"\"\r\n Returns:\r\n list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4\r\n \"\"\"\r\n anchors = []\r\n # buffers() not supported by torchscript. use named_buffers() instead\r\n buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()]\r\n for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers):\r\n shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors)\r\n shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)\r\n\r\n anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))\r\n\r\n return anchors\r\n\r\n def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):\r\n \"\"\"\r\n Generate a tensor storing canonical anchor boxes, which are all anchor\r\n boxes of different sizes and aspect_ratios centered at (0, 0).\r\n We can later build the set of anchors for a full feature map by\r\n shifting and tiling these tensors (see `meth:_grid_anchors`).\r\n\r\n Args:\r\n sizes (tuple[float]):\r\n aspect_ratios (tuple[float]]):\r\n\r\n Returns:\r\n Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes\r\n in XYXY format.\r\n \"\"\"\r\n\r\n # This is different from the anchor generator defined in the original Faster R-CNN\r\n # code or Detectron. They yield the same AP, however the old version defines cell\r\n # anchors in a less natural way with a shift relative to the feature grid and\r\n # quantization that results in slightly different sizes for different aspect ratios.\r\n # See also https://github.com/facebookresearch/Detectron/issues/227\r\n\r\n anchors = []\r\n for size in sizes:\r\n area = size**2.0\r\n for aspect_ratio in aspect_ratios:\r\n # s * s = w * h\r\n # a = h / w\r\n # ... some algebra ...\r\n # w = sqrt(s * s / a)\r\n # h = a * w\r\n w = math.sqrt(area / aspect_ratio)\r\n h = aspect_ratio * w\r\n x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0\r\n anchors.append([x0, y0, x1, y1])\r\n return torch.tensor(anchors)\r\n\r\n def forward(self, features: List[torch.Tensor]):\r\n \"\"\"\r\n Args:\r\n features (list[Tensor]): list of backbone feature maps on which to generate anchors.\r\n\r\n Returns:\r\n list[Boxes]: a list of Boxes containing all the anchors for each feature map\r\n (i.e. the cell anchors repeated over all locations in the feature map).\r\n The number of anchors of each feature map is Hi x Wi x num_cell_anchors,\r\n where Hi, Wi are resolution of the feature map divided by anchor stride.\r\n \"\"\"\r\n grid_sizes = [feature_map.shape[-2:] for feature_map in features]\r\n anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)\r\n return [Boxes(x) for x in anchors_over_all_feature_maps]\r" }, { "identifier": "Backbone", "path": "annotator/oneformer/detectron2/modeling/backbone/backbone.py", "snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\r\n \"\"\"\r\n Abstract base class for network backbones.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The `__init__` method of any subclass can specify its own set of arguments.\r\n \"\"\"\r\n super().__init__()\r\n\r\n @abstractmethod\r\n def forward(self):\r\n \"\"\"\r\n Subclasses must override this method, but adhere to the same return type.\r\n\r\n Returns:\r\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\r\n \"\"\"\r\n pass\r\n\r\n @property\r\n def size_divisibility(self) -> int:\r\n \"\"\"\r\n Some backbones require the input height and width to be divisible by a\r\n specific integer. This is typically true for encoder / decoder type networks\r\n with lateral connection (e.g., FPN) for which feature maps need to match\r\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\r\n input size divisibility is required.\r\n \"\"\"\r\n return 0\r\n\r\n @property\r\n def padding_constraints(self) -> Dict[str, int]:\r\n \"\"\"\r\n This property is a generalization of size_divisibility. Some backbones and training\r\n recipes require specific padding constraints, such as enforcing divisibility by a specific\r\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\r\n in :paper:vitdet). `padding_constraints` contains these optional items like:\r\n {\r\n \"size_divisibility\": int,\r\n \"square_size\": int,\r\n # Future options are possible\r\n }\r\n `size_divisibility` will read from here if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n\r\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\r\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\r\n \"\"\"\r\n return {}\r\n\r\n def output_shape(self):\r\n \"\"\"\r\n Returns:\r\n dict[str->ShapeSpec]\r\n \"\"\"\r\n # this is a backward-compatible default\r\n return {\r\n name: ShapeSpec(\r\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\r\n )\r\n for name in self._out_features\r\n }\r" }, { "identifier": "Box2BoxTransformLinear", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "class Box2BoxTransformLinear(object):\r\n \"\"\"\r\n The linear box-to-box transform defined in FCOS. The transformation is parameterized\r\n by the distance from the center of (square) src box to 4 edges of the target box.\r\n \"\"\"\r\n\r\n def __init__(self, normalize_by_size=True):\r\n \"\"\"\r\n Args:\r\n normalize_by_size: normalize deltas by the size of src (anchor) boxes.\r\n \"\"\"\r\n self.normalize_by_size = normalize_by_size\r\n\r\n def get_deltas(self, src_boxes, target_boxes):\r\n \"\"\"\r\n Get box regression transformation deltas (dx1, dy1, dx2, dy2) that can be used\r\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\r\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true.\r\n The center of src must be inside target boxes.\r\n\r\n Args:\r\n src_boxes (Tensor): square source boxes, e.g., anchors\r\n target_boxes (Tensor): target of the transformation, e.g., ground-truth\r\n boxes.\r\n \"\"\"\r\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\r\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\r\n\r\n src_ctr_x = 0.5 * (src_boxes[:, 0] + src_boxes[:, 2])\r\n src_ctr_y = 0.5 * (src_boxes[:, 1] + src_boxes[:, 3])\r\n\r\n target_l = src_ctr_x - target_boxes[:, 0]\r\n target_t = src_ctr_y - target_boxes[:, 1]\r\n target_r = target_boxes[:, 2] - src_ctr_x\r\n target_b = target_boxes[:, 3] - src_ctr_y\r\n\r\n deltas = torch.stack((target_l, target_t, target_r, target_b), dim=1)\r\n if self.normalize_by_size:\r\n stride_w = src_boxes[:, 2] - src_boxes[:, 0]\r\n stride_h = src_boxes[:, 3] - src_boxes[:, 1]\r\n strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)\r\n deltas = deltas / strides\r\n\r\n return deltas\r\n\r\n def apply_deltas(self, deltas, boxes):\r\n \"\"\"\r\n Apply transformation `deltas` (dx1, dy1, dx2, dy2) to `boxes`.\r\n\r\n Args:\r\n deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.\r\n deltas[i] represents k potentially different class-specific\r\n box transformations for the single box boxes[i].\r\n boxes (Tensor): boxes to transform, of shape (N, 4)\r\n \"\"\"\r\n # Ensure the output is a valid box. See Sec 2.1 of https://arxiv.org/abs/2006.09214\r\n deltas = F.relu(deltas)\r\n boxes = boxes.to(deltas.dtype)\r\n\r\n ctr_x = 0.5 * (boxes[:, 0] + boxes[:, 2])\r\n ctr_y = 0.5 * (boxes[:, 1] + boxes[:, 3])\r\n if self.normalize_by_size:\r\n stride_w = boxes[:, 2] - boxes[:, 0]\r\n stride_h = boxes[:, 3] - boxes[:, 1]\r\n strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)\r\n deltas = deltas * strides\r\n\r\n l = deltas[:, 0::4]\r\n t = deltas[:, 1::4]\r\n r = deltas[:, 2::4]\r\n b = deltas[:, 3::4]\r\n\r\n pred_boxes = torch.zeros_like(deltas)\r\n pred_boxes[:, 0::4] = ctr_x[:, None] - l # x1\r\n pred_boxes[:, 1::4] = ctr_y[:, None] - t # y1\r\n pred_boxes[:, 2::4] = ctr_x[:, None] + r # x2\r\n pred_boxes[:, 3::4] = ctr_y[:, None] + b # y2\r\n return pred_boxes\r" }, { "identifier": "_dense_box_regression_loss", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "def _dense_box_regression_loss(\r\n anchors: List[Union[Boxes, torch.Tensor]],\r\n box2box_transform: Box2BoxTransform,\r\n pred_anchor_deltas: List[torch.Tensor],\r\n gt_boxes: List[torch.Tensor],\r\n fg_mask: torch.Tensor,\r\n box_reg_loss_type=\"smooth_l1\",\r\n smooth_l1_beta=0.0,\r\n):\r\n \"\"\"\r\n Compute loss for dense multi-level box regression.\r\n Loss is accumulated over ``fg_mask``.\r\n\r\n Args:\r\n anchors: #lvl anchor boxes, each is (HixWixA, 4)\r\n pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)\r\n gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))\r\n fg_mask: the foreground boolean mask of shape (N, R) to compute loss on\r\n box_reg_loss_type (str): Loss type to use. Supported losses: \"smooth_l1\", \"giou\",\r\n \"diou\", \"ciou\".\r\n smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to\r\n use L1 loss. Only used when `box_reg_loss_type` is \"smooth_l1\"\r\n \"\"\"\r\n if isinstance(anchors[0], Boxes):\r\n anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)\r\n else:\r\n anchors = cat(anchors)\r\n if box_reg_loss_type == \"smooth_l1\":\r\n gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]\r\n gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)\r\n loss_box_reg = smooth_l1_loss(\r\n cat(pred_anchor_deltas, dim=1)[fg_mask],\r\n gt_anchor_deltas[fg_mask],\r\n beta=smooth_l1_beta,\r\n reduction=\"sum\",\r\n )\r\n elif box_reg_loss_type == \"giou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = giou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"diou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = diou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"ciou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = ciou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n else:\r\n raise ValueError(f\"Invalid dense box regression loss type '{box_reg_loss_type}'\")\r\n return loss_box_reg\r" }, { "identifier": "DenseDetector", "path": "annotator/oneformer/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "class DenseDetector(nn.Module):\r\n \"\"\"\r\n Base class for dense detector. We define a dense detector as a fully-convolutional model that\r\n makes per-pixel (i.e. dense) predictions.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n backbone: Backbone,\r\n head: nn.Module,\r\n head_in_features: Optional[List[str]] = None,\r\n *,\r\n pixel_mean,\r\n pixel_std,\r\n ):\r\n \"\"\"\r\n Args:\r\n backbone: backbone module\r\n head: head module\r\n head_in_features: backbone features to use in head. Default to all backbone features.\r\n pixel_mean (Tuple[float]):\r\n Values to be used for image normalization (BGR order).\r\n To train on images of different number of channels, set different mean & std.\r\n Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]\r\n pixel_std (Tuple[float]):\r\n When using pre-trained models in Detectron1 or any MSRA models,\r\n std has been absorbed into its conv1 weights, so the std needs to be set 1.\r\n Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.backbone = backbone\r\n self.head = head\r\n if head_in_features is None:\r\n shapes = self.backbone.output_shape()\r\n self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride)\r\n else:\r\n self.head_in_features = head_in_features\r\n self.register_buffer(\"pixel_mean\", torch.tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self):\r\n return self.pixel_mean.device\r\n\r\n def _move_to_current_device(self, x):\r\n return move_device_like(x, self.pixel_mean)\r\n\r\n def forward(self, batched_inputs: List[Dict[str, Tensor]]):\r\n \"\"\"\r\n Args:\r\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\r\n Each item in the list contains the inputs for one image.\r\n For now, each item in the list is a dict that contains:\r\n\r\n * image: Tensor, image in (C, H, W) format.\r\n * instances: Instances\r\n\r\n Other information that's included in the original dicts, such as:\r\n\r\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\r\n See :meth:`postprocess` for details.\r\n\r\n Returns:\r\n In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the\r\n loss. Used during training only. In inference, the standard output format, described\r\n in :doc:`/tutorials/models`.\r\n \"\"\"\r\n images = self.preprocess_image(batched_inputs)\r\n features = self.backbone(images.tensor)\r\n features = [features[f] for f in self.head_in_features]\r\n predictions = self.head(features)\r\n\r\n if self.training:\r\n assert not torch.jit.is_scripting(), \"Not supported\"\r\n assert \"instances\" in batched_inputs[0], \"Instance annotations are missing in training!\"\r\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\r\n return self.forward_training(images, features, predictions, gt_instances)\r\n else:\r\n results = self.forward_inference(images, features, predictions)\r\n if torch.jit.is_scripting():\r\n return results\r\n\r\n processed_results = []\r\n for results_per_image, input_per_image, image_size in zip(\r\n results, batched_inputs, images.image_sizes\r\n ):\r\n height = input_per_image.get(\"height\", image_size[0])\r\n width = input_per_image.get(\"width\", image_size[1])\r\n r = detector_postprocess(results_per_image, height, width)\r\n processed_results.append({\"instances\": r})\r\n return processed_results\r\n\r\n def forward_training(self, images, features, predictions, gt_instances):\r\n raise NotImplementedError()\r\n\r\n def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]):\r\n \"\"\"\r\n Normalize, pad and batch the input images.\r\n \"\"\"\r\n images = [self._move_to_current_device(x[\"image\"]) for x in batched_inputs]\r\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\r\n images = ImageList.from_tensors(\r\n images,\r\n self.backbone.size_divisibility,\r\n padding_constraints=self.backbone.padding_constraints,\r\n )\r\n return images\r\n\r\n def _transpose_dense_predictions(\r\n self, predictions: List[List[Tensor]], dims_per_anchor: List[int]\r\n ) -> List[List[Tensor]]:\r\n \"\"\"\r\n Transpose the dense per-level predictions.\r\n\r\n Args:\r\n predictions: a list of outputs, each is a list of per-level\r\n predictions with shape (N, Ai x K, Hi, Wi), where N is the\r\n number of images, Ai is the number of anchors per location on\r\n level i, K is the dimension of predictions per anchor.\r\n dims_per_anchor: the value of K for each predictions. e.g. 4 for\r\n box prediction, #classes for classification prediction.\r\n\r\n Returns:\r\n List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K).\r\n \"\"\"\r\n assert len(predictions) == len(dims_per_anchor)\r\n res: List[List[Tensor]] = []\r\n for pred, dim_per_anchor in zip(predictions, dims_per_anchor):\r\n pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred]\r\n res.append(pred)\r\n return res\r\n\r\n def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9):\r\n \"\"\"\r\n Apply EMA update to `self.name` using `value`.\r\n\r\n This is mainly used for loss normalizer. In Detectron1, loss is normalized by number\r\n of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a\r\n large variance and using it lead to lower performance. Therefore we maintain an EMA of\r\n #foreground to stabilize the normalizer.\r\n\r\n Args:\r\n name: name of the normalizer\r\n value: the new value to update\r\n initial_value: the initial value to start with\r\n momentum: momentum of EMA\r\n\r\n Returns:\r\n float: the updated EMA value\r\n \"\"\"\r\n if hasattr(self, name):\r\n old = getattr(self, name)\r\n else:\r\n old = initial_value\r\n new = old * momentum + value * (1 - momentum)\r\n setattr(self, name, new)\r\n return new\r\n\r\n def _decode_per_level_predictions(\r\n self,\r\n anchors: Boxes,\r\n pred_scores: Tensor,\r\n pred_deltas: Tensor,\r\n score_thresh: float,\r\n topk_candidates: int,\r\n image_size: Tuple[int, int],\r\n ) -> Instances:\r\n \"\"\"\r\n Decode boxes and classification predictions of one featuer level, by\r\n the following steps:\r\n 1. filter the predictions based on score threshold and top K scores.\r\n 2. transform the box regression outputs\r\n 3. return the predicted scores, classes and boxes\r\n\r\n Args:\r\n anchors: Boxes, anchor for this feature level\r\n pred_scores: HxWxA,K\r\n pred_deltas: HxWxA,4\r\n\r\n Returns:\r\n Instances: with field \"scores\", \"pred_boxes\", \"pred_classes\".\r\n \"\"\"\r\n # Apply two filtering to make NMS faster.\r\n # 1. Keep boxes with confidence score higher than threshold\r\n keep_idxs = pred_scores > score_thresh\r\n pred_scores = pred_scores[keep_idxs]\r\n topk_idxs = torch.nonzero(keep_idxs) # Kx2\r\n\r\n # 2. Keep top k top scoring boxes only\r\n topk_idxs_size = topk_idxs.shape[0]\r\n if isinstance(topk_idxs_size, Tensor):\r\n # It's a tensor in tracing\r\n num_topk = torch.clamp(topk_idxs_size, max=topk_candidates)\r\n else:\r\n num_topk = min(topk_idxs_size, topk_candidates)\r\n pred_scores, idxs = pred_scores.topk(num_topk)\r\n topk_idxs = topk_idxs[idxs]\r\n\r\n anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1)\r\n\r\n pred_boxes = self.box2box_transform.apply_deltas(\r\n pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs]\r\n )\r\n return Instances(\r\n image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs\r\n )\r\n\r\n def _decode_multi_level_predictions(\r\n self,\r\n anchors: List[Boxes],\r\n pred_scores: List[Tensor],\r\n pred_deltas: List[Tensor],\r\n score_thresh: float,\r\n topk_candidates: int,\r\n image_size: Tuple[int, int],\r\n ) -> Instances:\r\n \"\"\"\r\n Run `_decode_per_level_predictions` for all feature levels and concat the results.\r\n \"\"\"\r\n predictions = [\r\n self._decode_per_level_predictions(\r\n anchors_i,\r\n box_cls_i,\r\n box_reg_i,\r\n self.test_score_thresh,\r\n self.test_topk_candidates,\r\n image_size,\r\n )\r\n # Iterate over every feature level\r\n for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors)\r\n ]\r\n return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is\r\n\r\n def visualize_training(self, batched_inputs, results):\r\n \"\"\"\r\n A function used to visualize ground truth images and final network predictions.\r\n It shows ground truth bounding boxes on the original image and up to 20\r\n predicted object bounding boxes on the original image.\r\n\r\n Args:\r\n batched_inputs (list): a list that contains input to the model.\r\n results (List[Instances]): a list of #images elements returned by forward_inference().\r\n \"\"\"\r\n from annotator.oneformer.detectron2.utils.visualizer import Visualizer\r\n\r\n assert len(batched_inputs) == len(\r\n results\r\n ), \"Cannot visualize inputs and results of different sizes\"\r\n storage = get_event_storage()\r\n max_boxes = 20\r\n\r\n image_index = 0 # only visualize a single image\r\n img = batched_inputs[image_index][\"image\"]\r\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\r\n v_gt = Visualizer(img, None)\r\n v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\r\n anno_img = v_gt.get_image()\r\n processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])\r\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\r\n\r\n v_pred = Visualizer(img, None)\r\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])\r\n prop_img = v_pred.get_image()\r\n vis_img = np.vstack((anno_img, prop_img))\r\n vis_img = vis_img.transpose(2, 0, 1)\r\n vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\r\n storage.put_image(vis_name, vis_img)\r" }, { "identifier": "RetinaNetHead", "path": "annotator/oneformer/detectron2/modeling/meta_arch/retinanet.py", "snippet": "class RetinaNetHead(nn.Module):\r\n \"\"\"\r\n The head used in RetinaNet for object classification and box regression.\r\n It has two subnets for the two tasks, with a common structure but separate parameters.\r\n \"\"\"\r\n\r\n @configurable\r\n def __init__(\r\n self,\r\n *,\r\n input_shape: List[ShapeSpec],\r\n num_classes,\r\n num_anchors,\r\n conv_dims: List[int],\r\n norm=\"\",\r\n prior_prob=0.01,\r\n ):\r\n \"\"\"\r\n NOTE: this interface is experimental.\r\n\r\n Args:\r\n input_shape (List[ShapeSpec]): input shape\r\n num_classes (int): number of classes. Used to label background proposals.\r\n num_anchors (int): number of generated anchors\r\n conv_dims (List[int]): dimensions for each convolution layer\r\n norm (str or callable):\r\n Normalization for conv layers except for the two output layers.\r\n See :func:`detectron2.layers.get_norm` for supported types.\r\n prior_prob (float): Prior weight for computing bias\r\n \"\"\"\r\n super().__init__()\r\n\r\n self._num_features = len(input_shape)\r\n if norm == \"BN\" or norm == \"SyncBN\":\r\n logger.info(\r\n f\"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}.\"\r\n )\r\n bn_class = nn.BatchNorm2d if norm == \"BN\" else nn.SyncBatchNorm\r\n\r\n def norm(c):\r\n return CycleBatchNormList(\r\n length=self._num_features, bn_class=bn_class, num_features=c\r\n )\r\n\r\n else:\r\n norm_name = str(type(get_norm(norm, 32)))\r\n if \"BN\" in norm_name:\r\n logger.warning(\r\n f\"Shared BatchNorm (type={norm_name}) may not work well in RetinaNetHead.\"\r\n )\r\n\r\n cls_subnet = []\r\n bbox_subnet = []\r\n for in_channels, out_channels in zip(\r\n [input_shape[0].channels] + list(conv_dims), conv_dims\r\n ):\r\n cls_subnet.append(\r\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\r\n )\r\n if norm:\r\n cls_subnet.append(get_norm(norm, out_channels))\r\n cls_subnet.append(nn.ReLU())\r\n bbox_subnet.append(\r\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\r\n )\r\n if norm:\r\n bbox_subnet.append(get_norm(norm, out_channels))\r\n bbox_subnet.append(nn.ReLU())\r\n\r\n self.cls_subnet = nn.Sequential(*cls_subnet)\r\n self.bbox_subnet = nn.Sequential(*bbox_subnet)\r\n self.cls_score = nn.Conv2d(\r\n conv_dims[-1], num_anchors * num_classes, kernel_size=3, stride=1, padding=1\r\n )\r\n self.bbox_pred = nn.Conv2d(\r\n conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1\r\n )\r\n\r\n # Initialization\r\n for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]:\r\n for layer in modules.modules():\r\n if isinstance(layer, nn.Conv2d):\r\n torch.nn.init.normal_(layer.weight, mean=0, std=0.01)\r\n torch.nn.init.constant_(layer.bias, 0)\r\n\r\n # Use prior in model initialization to improve stability\r\n bias_value = -(math.log((1 - prior_prob) / prior_prob))\r\n torch.nn.init.constant_(self.cls_score.bias, bias_value)\r\n\r\n @classmethod\r\n def from_config(cls, cfg, input_shape: List[ShapeSpec]):\r\n num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors\r\n assert (\r\n len(set(num_anchors)) == 1\r\n ), \"Using different number of anchors between levels is not currently supported!\"\r\n num_anchors = num_anchors[0]\r\n\r\n return {\r\n \"input_shape\": input_shape,\r\n \"num_classes\": cfg.MODEL.RETINANET.NUM_CLASSES,\r\n \"conv_dims\": [input_shape[0].channels] * cfg.MODEL.RETINANET.NUM_CONVS,\r\n \"prior_prob\": cfg.MODEL.RETINANET.PRIOR_PROB,\r\n \"norm\": cfg.MODEL.RETINANET.NORM,\r\n \"num_anchors\": num_anchors,\r\n }\r\n\r\n def forward(self, features: List[Tensor]):\r\n \"\"\"\r\n Arguments:\r\n features (list[Tensor]): FPN feature map tensors in high to low resolution.\r\n Each tensor in the list correspond to different feature levels.\r\n\r\n Returns:\r\n logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).\r\n The tensor predicts the classification probability\r\n at each spatial position for each of the A anchors and K object\r\n classes.\r\n bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).\r\n The tensor predicts 4-vector (dx,dy,dw,dh) box\r\n regression values for every anchor. These values are the\r\n relative offset between the anchor and the ground truth box.\r\n \"\"\"\r\n assert len(features) == self._num_features\r\n logits = []\r\n bbox_reg = []\r\n for feature in features:\r\n logits.append(self.cls_score(self.cls_subnet(feature)))\r\n bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))\r\n return logits, bbox_reg\r" } ]
import logging import torch from typing import List, Optional, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import nn from torch.nn import functional as F from annotator.oneformer.detectron2.layers import ShapeSpec, batched_nms from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import DefaultAnchorGenerator from ..backbone import Backbone from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss from .dense_detector import DenseDetector from .retinanet import RetinaNetHead
13,829
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["FCOS"] logger = logging.getLogger(__name__) class FCOS(DenseDetector): """ Implement FCOS in :paper:`fcos`. """ def __init__( self, *,
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["FCOS"] logger = logging.getLogger(__name__) class FCOS(DenseDetector): """ Implement FCOS in :paper:`fcos`. """ def __init__( self, *,
backbone: Backbone,
8
2023-12-05 02:51:53+00:00
16k
u2seg/U2Seg
detectron2/data/build.py
[ { "identifier": "configurable", "path": "detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "get_world_size", "path": "detectron2/utils/comm.py", "snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "seed_all_rng", "path": "detectron2/utils/env.py", "snippet": "def seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "_log_api_usage", "path": "detectron2/utils/logger.py", "snippet": "def _log_api_usage(identifier: str):\n \"\"\"\n Internal function used to log the usage of different detectron2 components\n inside facebook's infra.\n \"\"\"\n torch._C._log_api_usage_once(\"detectron2.\" + identifier)" }, { "identifier": "log_first_n", "path": "detectron2/utils/logger.py", "snippet": "def log_first_n(lvl, msg, n=1, *, name=None, key=\"caller\"):\n \"\"\"\n Log only for the first n times.\n\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n key (str or tuple[str]): the string(s) can be one of \"caller\" or\n \"message\", which defines how to identify duplicated logs.\n For example, if called with `n=1, key=\"caller\"`, this function\n will only log the first call from the same caller, regardless of\n the message content.\n If called with `n=1, key=\"message\"`, this function will log the\n same content only once, even if they are called from different places.\n If called with `n=1, key=(\"caller\", \"message\")`, this function\n will not log only if the same caller has logged the same message before.\n \"\"\"\n if isinstance(key, str):\n key = (key,)\n assert len(key) > 0\n\n caller_module, caller_key = _find_caller()\n hash_key = ()\n if \"caller\" in key:\n hash_key = hash_key + caller_key\n if \"message\" in key:\n hash_key = hash_key + (msg,)\n\n _LOG_COUNTER[hash_key] += 1\n if _LOG_COUNTER[hash_key] <= n:\n logging.getLogger(name or caller_module).log(lvl, msg)" }, { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "AspectRatioGroupedDataset", "path": "detectron2/data/common.py", "snippet": "class AspectRatioGroupedDataset(data.IterableDataset):\n \"\"\"\n Batch data that have similar aspect ratio together.\n In this implementation, images whose aspect ratio < (or >) 1 will\n be batched together.\n This improves training speed because the images then need less padding\n to form a batch.\n\n It assumes the underlying dataset produces dicts with \"width\" and \"height\" keys.\n It will then produce a list of original dicts with length = batch_size,\n all with similar aspect ratios.\n \"\"\"\n\n def __init__(self, dataset, batch_size):\n \"\"\"\n Args:\n dataset: an iterable. Each element must be a dict with keys\n \"width\" and \"height\", which will be used to batch data.\n batch_size (int):\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self._buckets = [[] for _ in range(2)]\n # Hard-coded two aspect ratio groups: w > h and w < h.\n # Can add support for more aspect ratio groups, but doesn't seem useful\n\n def __iter__(self):\n for d in self.dataset:\n w, h = d[\"width\"], d[\"height\"]\n bucket_id = 0 if w > h else 1\n bucket = self._buckets[bucket_id]\n bucket.append(d)\n if len(bucket) == self.batch_size:\n data = bucket[:]\n # Clear bucket first, because code after yield is not\n # guaranteed to execute\n del bucket[:]\n yield data" }, { "identifier": "DatasetFromList", "path": "detectron2/data/common.py", "snippet": "class DatasetFromList(data.Dataset):\n \"\"\"\n Wrap a list to a torch Dataset. It produces elements of the list as data.\n \"\"\"\n\n def __init__(\n self,\n lst: list,\n copy: bool = True,\n serialize: Union[bool, Callable] = True,\n ):\n \"\"\"\n Args:\n lst (list): a list which contains elements to produce.\n copy (bool): whether to deepcopy the element when producing it,\n so that the result can be modified in place without affecting the\n source in the list.\n serialize (bool or callable): whether to serialize the stroage to other\n backend. If `True`, the default serialize method will be used, if given\n a callable, the callable will be used as serialize method.\n \"\"\"\n self._lst = lst\n self._copy = copy\n if not isinstance(serialize, (bool, Callable)):\n raise TypeError(f\"Unsupported type for argument `serailzie`: {serialize}\")\n self._serialize = serialize is not False\n\n if self._serialize:\n serialize_method = (\n serialize\n if isinstance(serialize, Callable)\n else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD\n )\n logger.info(f\"Serializing the dataset using: {serialize_method}\")\n self._lst = serialize_method(self._lst)\n\n def __len__(self):\n return len(self._lst)\n\n def __getitem__(self, idx):\n if self._copy and not self._serialize:\n return copy.deepcopy(self._lst[idx])\n else:\n return self._lst[idx]" }, { "identifier": "MapDataset", "path": "detectron2/data/common.py", "snippet": "class MapDataset(data.Dataset):\n \"\"\"\n Map a function over the elements in a dataset.\n \"\"\"\n\n def __init__(self, dataset, map_func):\n \"\"\"\n Args:\n dataset: a dataset where map function is applied. Can be either\n map-style or iterable dataset. When given an iterable dataset,\n the returned object will also be an iterable dataset.\n map_func: a callable which maps the element in dataset. map_func can\n return None to skip the data (e.g. in case of errors).\n How None is handled depends on the style of `dataset`.\n If `dataset` is map-style, it randomly tries other elements.\n If `dataset` is iterable, it skips the data and tries the next.\n \"\"\"\n self._dataset = dataset\n self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work\n\n self._rng = random.Random(42)\n self._fallback_candidates = set(range(len(dataset)))\n\n def __new__(cls, dataset, map_func):\n is_iterable = isinstance(dataset, data.IterableDataset)\n if is_iterable:\n return _MapIterableDataset(dataset, map_func)\n else:\n return super().__new__(cls)\n\n def __getnewargs__(self):\n return self._dataset, self._map_func\n\n def __len__(self):\n return len(self._dataset)\n\n def __getitem__(self, idx):\n retry_count = 0\n cur_idx = int(idx)\n\n while True:\n data = self._map_func(self._dataset[cur_idx])\n if data is not None:\n self._fallback_candidates.add(cur_idx)\n return data\n\n # _map_func fails for this idx, use a random new index from the pool\n retry_count += 1\n self._fallback_candidates.discard(cur_idx)\n cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]\n\n if retry_count >= 3:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to apply `_map_func` for idx: {}, retry count: {}\".format(\n idx, retry_count\n )\n )" }, { "identifier": "ToIterableDataset", "path": "detectron2/data/common.py", "snippet": "class ToIterableDataset(data.IterableDataset):\n \"\"\"\n Convert an old indices-based (also called map-style) dataset\n to an iterable-style dataset.\n \"\"\"\n\n def __init__(\n self,\n dataset: data.Dataset,\n sampler: Sampler,\n shard_sampler: bool = True,\n shard_chunk_size: int = 1,\n ):\n \"\"\"\n Args:\n dataset: an old-style dataset with ``__getitem__``\n sampler: a cheap iterable that produces indices to be applied on ``dataset``.\n shard_sampler: whether to shard the sampler based on the current pytorch data loader\n worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple\n workers, it is responsible for sharding its data based on worker id so that workers\n don't produce identical data.\n\n Most samplers (like our TrainingSampler) do not shard based on dataloader worker id\n and this argument should be set to True. But certain samplers may be already\n sharded, in that case this argument should be set to False.\n shard_chunk_size: when sharding the sampler, each worker will\n \"\"\"\n assert not isinstance(dataset, data.IterableDataset), dataset\n assert isinstance(sampler, Sampler), sampler\n self.dataset = dataset\n self.sampler = sampler\n self.shard_sampler = shard_sampler\n self.shard_chunk_size = shard_chunk_size\n\n def __iter__(self):\n if not self.shard_sampler:\n sampler = self.sampler\n else:\n # With map-style dataset, `DataLoader(dataset, sampler)` runs the\n # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`\n # will run sampler in every of the N worker. So we should only keep 1/N of the ids on\n # each worker. The assumption is that sampler is cheap to iterate so it's fine to\n # discard ids in workers.\n sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)\n for idx in sampler:\n yield self.dataset[idx]\n\n def __len__(self):\n return len(self.sampler)" }, { "identifier": "DatasetMapper", "path": "detectron2/data/dataset_mapper.py", "snippet": "class DatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n use_keypoint: bool = False,\n instance_mask_format: str = \"polygon\",\n keypoint_hflip_indices: Optional[np.ndarray] = None,\n precomputed_proposal_topk: Optional[int] = None,\n recompute_boxes: bool = False,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of \"polygon\" or \"bitmask\". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n \"\"\"\n if recompute_boxes:\n assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.instance_mask_format = instance_mask_format\n self.use_keypoint = use_keypoint\n self.keypoint_hflip_indices = keypoint_hflip_indices\n self.proposal_topk = precomputed_proposal_topk\n self.recompute_boxes = recompute_boxes\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = utils.build_augmentation(cfg, is_train)\n if cfg.INPUT.CROP.ENABLED and is_train:\n augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))\n recompute_boxes = cfg.MODEL.MASK_ON\n else:\n recompute_boxes = False\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"instance_mask_format\": cfg.INPUT.MASK_FORMAT,\n \"use_keypoint\": cfg.MODEL.KEYPOINT_ON,\n \"recompute_boxes\": recompute_boxes,\n }\n\n if cfg.MODEL.KEYPOINT_ON:\n ret[\"keypoint_hflip_indices\"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)\n\n if cfg.MODEL.LOAD_PROPOSALS:\n ret[\"precomputed_proposal_topk\"] = (\n cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN\n if is_train\n else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST\n )\n return ret\n\n def _transform_annotations(self, dataset_dict, transforms, image_shape):\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n if not self.use_instance_mask:\n anno.pop(\"segmentation\", None)\n if not self.use_keypoint:\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(\n obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices\n )\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(\n annos, image_shape, mask_format=self.instance_mask_format\n )\n\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n if self.recompute_boxes:\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n transforms = self.augmentations(aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n # USER: Remove if you don't use pre-computed proposals.\n # Most users would not need this feature.\n if self.proposal_topk is not None:\n utils.transform_proposals(\n dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk\n )\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n self._transform_annotations(dataset_dict, transforms, image_shape)\n\n return dataset_dict" }, { "identifier": "check_metadata_consistency", "path": "detectron2/data/detection_utils.py", "snippet": "def check_metadata_consistency(key, dataset_names):\n \"\"\"\n Check that the datasets have consistent metadata.\n\n Args:\n key (str): a metadata key\n dataset_names (list[str]): a list of dataset names\n\n Raises:\n AttributeError: if the key does not exist in the metadata\n ValueError: if the given datasets do not have the same metadata values defined by key\n \"\"\"\n if len(dataset_names) == 0:\n return\n logger = logging.getLogger(__name__)\n entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]\n for idx, entry in enumerate(entries_per_dataset):\n if entry != entries_per_dataset[0]:\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(key, dataset_names[idx], str(entry))\n )\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(\n key, dataset_names[0], str(entries_per_dataset[0])\n )\n )\n raise ValueError(\"Datasets have different metadata '{}'!\".format(key))" }, { "identifier": "InferenceSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class InferenceSampler(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self._size = size\n assert size > 0\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n self._local_indices = self._get_local_indices(size, self._world_size, self._rank)\n\n @staticmethod\n def _get_local_indices(total_size, world_size, rank):\n shard_size = total_size // world_size\n left = total_size % world_size\n shard_sizes = [shard_size + int(r < left) for r in range(world_size)]\n\n begin = sum(shard_sizes[:rank])\n end = min(sum(shard_sizes[: rank + 1]), total_size)\n return range(begin, end)\n\n def __iter__(self):\n yield from self._local_indices\n\n def __len__(self):\n return len(self._local_indices)" }, { "identifier": "RandomSubsetTrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class RandomSubsetTrainingSampler(TrainingSampler):\n \"\"\"\n Similar to TrainingSampler, but only sample a random subset of indices.\n This is useful when you want to estimate the accuracy vs data-number curves by\n training the model with different subset_ratio.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n subset_ratio: float,\n shuffle: bool = True,\n seed_shuffle: Optional[int] = None,\n seed_subset: Optional[int] = None,\n ):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n subset_ratio (float): the ratio of subset data to sample from the underlying dataset\n shuffle (bool): whether to shuffle the indices or not\n seed_shuffle (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n seed_subset (int): the seed to randomize the subset to be sampled.\n Must be the same across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)\n\n assert 0.0 < subset_ratio <= 1.0\n self._size_subset = int(size * subset_ratio)\n assert self._size_subset > 0\n if seed_subset is None:\n seed_subset = comm.shared_random_seed()\n self._seed_subset = int(seed_subset)\n\n # randomly generate the subset indexes to be sampled from\n g = torch.Generator()\n g.manual_seed(self._seed_subset)\n indexes_randperm = torch.randperm(self._size, generator=g)\n self._indexes_subset = indexes_randperm[: self._size_subset]\n\n logger.info(\"Using RandomSubsetTrainingSampler......\")\n logger.info(f\"Randomly sample {self._size_subset} data from the original {self._size} data\")\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()\n while True:\n if self._shuffle:\n # generate a random permutation to shuffle self._indexes_subset\n randperm = torch.randperm(self._size_subset, generator=g)\n yield from self._indexes_subset[randperm].tolist()\n else:\n yield from self._indexes_subset.tolist()" }, { "identifier": "RepeatFactorTrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class RepeatFactorTrainingSampler(Sampler):\n \"\"\"\n Similar to TrainingSampler, but a sample may appear more times than others based\n on its \"repeat factor\". This is suitable for training on class imbalanced datasets like LVIS.\n \"\"\"\n\n def __init__(self, repeat_factors, *, shuffle=True, seed=None):\n \"\"\"\n Args:\n repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's\n full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n # Split into whole number (_int_part) and fractional (_frac_part) parts.\n self._int_part = torch.trunc(repeat_factors)\n self._frac_part = repeat_factors - self._int_part\n\n @staticmethod\n def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):\n \"\"\"\n Compute (fractional) per-image repeat factors based on category frequency.\n The repeat factor for an image is a function of the frequency of the rarest\n category labeled in that image. The \"frequency of category c\" in [0, 1] is defined\n as the fraction of images in the training set (without repeats) in which category c\n appears.\n See :paper:`lvis` (>= v2) Appendix B.2.\n\n Args:\n dataset_dicts (list[dict]): annotations in Detectron2 dataset format.\n repeat_thresh (float): frequency threshold below which data is repeated.\n If the frequency is half of `repeat_thresh`, the image will be\n repeated twice.\n\n Returns:\n torch.Tensor:\n the i-th element is the repeat factor for the dataset image at index i.\n \"\"\"\n # 1. For each category c, compute the fraction of images that contain it: f(c)\n category_freq = defaultdict(int)\n for dataset_dict in dataset_dicts: # For each image (without repeats)\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n for cat_id in cat_ids:\n category_freq[cat_id] += 1\n num_images = len(dataset_dicts)\n for k, v in category_freq.items():\n category_freq[k] = v / num_images\n\n # 2. For each category c, compute the category-level repeat factor:\n # r(c) = max(1, sqrt(t / f(c)))\n category_rep = {\n cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))\n for cat_id, cat_freq in category_freq.items()\n }\n\n # 3. For each image I, compute the image-level repeat factor:\n # r(I) = max_{c in I} r(c)\n rep_factors = []\n for dataset_dict in dataset_dicts:\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)\n rep_factors.append(rep_factor)\n\n return torch.tensor(rep_factors, dtype=torch.float32)\n\n def _get_epoch_indices(self, generator):\n \"\"\"\n Create a list of dataset indices (with repeats) to use for one epoch.\n\n Args:\n generator (torch.Generator): pseudo random number generator used for\n stochastic rounding.\n\n Returns:\n torch.Tensor: list of dataset indices to use in one epoch. Each index\n is repeated based on its calculated repeat factor.\n \"\"\"\n # Since repeat factors are fractional, we use stochastic rounding so\n # that the target repeat factor is achieved in expectation over the\n # course of training\n rands = torch.rand(len(self._frac_part), generator=generator)\n rep_factors = self._int_part + (rands < self._frac_part).float()\n # Construct a list of indices in which we repeat images as specified\n indices = []\n for dataset_index, rep_factor in enumerate(rep_factors):\n indices.extend([dataset_index] * int(rep_factor.item()))\n return torch.tensor(indices, dtype=torch.int64)\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n # Sample indices with repeats determined by stochastic rounding; each\n # \"epoch\" may have a slightly different size due to the rounding.\n indices = self._get_epoch_indices(g)\n if self._shuffle:\n randperm = torch.randperm(len(indices), generator=g)\n yield from indices[randperm].tolist()\n else:\n yield from indices.tolist()" }, { "identifier": "TrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n\n Note that this sampler does not shard based on pytorch DataLoader worker id.\n A sampler passed to pytorch DataLoader is used only with map-style dataset\n and will not be executed inside workers.\n But if this sampler is used in a way that it gets execute inside a dataloader\n worker, then extra work needs to be done to shard its outputs based on worker id.\n This is required so that workers don't produce identical data.\n :class:`ToIterableDataset` implements this logic.\n This note is true for all samplers in detectron2.\n \"\"\"\n\n def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(f\"TrainingSampler(size=) expects an int. Got type {type(size)}.\")\n if size <= 0:\n raise ValueError(f\"TrainingSampler(size=) expects a positive int. Got {size}.\")\n self._size = size\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n if self._shuffle:\n yield from torch.randperm(self._size, generator=g).tolist()\n else:\n yield from torch.arange(self._size).tolist()" } ]
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
12,599
logger.info("Making batched data loader with batch_size=%d", batch_size) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size) if aspect_ratio_grouping: assert drop_last, "Aspect ratio grouping will drop incomplete batches." data_loader = torchdata.DataLoader( dataset, num_workers=num_workers, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, **kwargs ) # yield individual mapped dict data_loader = AspectRatioGroupedDataset(data_loader, batch_size) if collate_fn is None: return data_loader return MapDataset(data_loader, collate_fn) else: return torchdata.DataLoader( dataset, batch_size=batch_size, drop_last=drop_last, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, worker_init_fn=worker_init_reset_seed, **kwargs ) def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]: repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR assert all(len(tup) == 2 for tup in repeat_factors) name_to_weight = defaultdict(lambda: 1, dict(repeat_factors)) # The sampling weights map should only contain datasets in train config unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN) assert not unrecognized, f"unrecognized datasets: {unrecognized}" logger = logging.getLogger(__name__) logger.info(f"Found repeat factors: {list(name_to_weight.items())}") # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`. return name_to_weight def _build_weighted_sampler(cfg, enable_category_balance=False): dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg) # OrderedDict to guarantee order of values() consistent with repeat factors dataset_name_to_dicts = OrderedDict( { name: get_detection_dataset_dicts( [name], filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) for name in cfg.DATASETS.TRAIN } ) # Repeat factor for every sample in the dataset repeat_factors = [ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN ] repeat_factors = list(itertools.chain.from_iterable(repeat_factors)) repeat_factors = torch.tensor(repeat_factors) logger = logging.getLogger(__name__) if enable_category_balance: """ 1. Calculate repeat factors using category frequency for each dataset and then merge them. 2. Element wise dot producting the dataset frequency repeat factors with the category frequency repeat factors gives the final repeat factors. """ category_repeat_factors = [ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD ) for dataset_dict in dataset_name_to_dicts.values() ] # flatten the category repeat factors from all datasets category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors)) category_repeat_factors = torch.tensor(category_repeat_factors) repeat_factors = torch.mul(category_repeat_factors, repeat_factors) repeat_factors = repeat_factors / torch.min(repeat_factors) logger.info( "Using WeightedCategoryTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) else: logger.info( "Using WeightedTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) sampler = RepeatFactorTrainingSampler(repeat_factors) return sampler def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None:
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """ world_size = get_world_size() assert ( total_batch_size > 0 and total_batch_size % world_size == 0 ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( total_batch_size, world_size ) batch_size = total_batch_size // world_size logger = logging.getLogger(__name__) logger.info("Making batched data loader with batch_size=%d", batch_size) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size) if aspect_ratio_grouping: assert drop_last, "Aspect ratio grouping will drop incomplete batches." data_loader = torchdata.DataLoader( dataset, num_workers=num_workers, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, **kwargs ) # yield individual mapped dict data_loader = AspectRatioGroupedDataset(data_loader, batch_size) if collate_fn is None: return data_loader return MapDataset(data_loader, collate_fn) else: return torchdata.DataLoader( dataset, batch_size=batch_size, drop_last=drop_last, num_workers=num_workers, collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, worker_init_fn=worker_init_reset_seed, **kwargs ) def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]: repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR assert all(len(tup) == 2 for tup in repeat_factors) name_to_weight = defaultdict(lambda: 1, dict(repeat_factors)) # The sampling weights map should only contain datasets in train config unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN) assert not unrecognized, f"unrecognized datasets: {unrecognized}" logger = logging.getLogger(__name__) logger.info(f"Found repeat factors: {list(name_to_weight.items())}") # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`. return name_to_weight def _build_weighted_sampler(cfg, enable_category_balance=False): dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg) # OrderedDict to guarantee order of values() consistent with repeat factors dataset_name_to_dicts = OrderedDict( { name: get_detection_dataset_dicts( [name], filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) for name in cfg.DATASETS.TRAIN } ) # Repeat factor for every sample in the dataset repeat_factors = [ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN ] repeat_factors = list(itertools.chain.from_iterable(repeat_factors)) repeat_factors = torch.tensor(repeat_factors) logger = logging.getLogger(__name__) if enable_category_balance: """ 1. Calculate repeat factors using category frequency for each dataset and then merge them. 2. Element wise dot producting the dataset frequency repeat factors with the category frequency repeat factors gives the final repeat factors. """ category_repeat_factors = [ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD ) for dataset_dict in dataset_name_to_dicts.values() ] # flatten the category repeat factors from all datasets category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors)) category_repeat_factors = torch.tensor(category_repeat_factors) repeat_factors = torch.mul(category_repeat_factors, repeat_factors) repeat_factors = repeat_factors / torch.min(repeat_factors) logger.info( "Using WeightedCategoryTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) else: logger.info( "Using WeightedTrainingSampler with repeat_factors={}".format( cfg.DATASETS.TRAIN_REPEAT_FACTOR ) ) sampler = RepeatFactorTrainingSampler(repeat_factors) return sampler def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None:
mapper = DatasetMapper(cfg, True)
12
2023-12-05 01:13:31+00:00
16k
upfusion3d/upfusion
control_net/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "control_net/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "control_net/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "control_net/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "control_net/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "control_net/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "control_net/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "control_net/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "control_net/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "control_net/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "control_net/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "control_net/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "control_net/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "control_net/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "control_net/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n disable_tdqm=kwargs.get(\"disable_tdqm\", False),\n cfg_type=kwargs.get(\"cfg_type\", None)\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, disable_tdqm=False, cfg_type=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps, disable=disable_tdqm)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n raise RuntimeError(\"not supported since this may mess up the new cfg logic\")\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim_v2(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold, cfg_type=cfg_type)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if (isinstance(c[k], list)) and (type(c[k][0]) is not PerspectiveCameras):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n elif (isinstance(c[k], list)) and ((type(c[k][0]) is PerspectiveCameras) or (c[k][0] is None)):\n c_in[k] = unconditional_conditioning[k] + c[k]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def p_sample_ddim_v2(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, cfg_type=None):\n # NOTE: v2 is a custom version so that modifications can be made more easily\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n if not isinstance(c, dict):\n raise RuntimeError(\"Not supported!\")\n\n # For cfg_type \"legacy\" or \"F1\"\n if isinstance(unconditional_conditioning, dict):\n c_in = dict()\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n for k in c:\n if (isinstance(c[k], list)) and (type(c[k][0]) is not PerspectiveCameras):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n elif (isinstance(c[k], list)) and ((type(c[k][0]) is PerspectiveCameras) or (c[k][0] is None)):\n c_in[k] = unconditional_conditioning[k] + c[k]\n elif (isinstance(c[k], torch.Tensor)):\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]], dim=0)\n else:\n raise RuntimeError(\"Not supported!\")\n\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n elif isinstance(unconditional_conditioning, list):\n raise ValueError\n\n else:\n raise RuntimeError(\"Not supported!\")\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n raise RuntimeError(\"Function supported since the new cfg logic is not incorporated here\")\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(\n self, x_latent, cond, t_start, cfg_type=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None\n ):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps, disable=True)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim_v2(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, \n cfg_type=cfg_type, unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning\n )\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from control_net.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from control_net.ldm.modules.ema import LitEma from control_net.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from control_net.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from control_net.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from control_net.ldm.models.diffusion.ddim import DDIMSampler
13,601
if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def perform_one_step_denoise(self, batch): """ Returns the recovered image after one step denoising. NOTE: This is a custom function added by BR! """ x, c = self.get_input(batch, self.first_stage_key) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() _, x0 = self.p_sample( x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=True, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None ) return x0, t def custom_forward(self, batch, **kwargs): """ Performs the usual forward pass but also returns the model output NOTE: This is a custom function added by BR! """ if self.parameterization != "eps": raise NotImplementedError x, c = self.get_input(batch, self.first_stage_key) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() # Below code was taken and adapted from p_losses noise = torch.randn_like(x) x_noisy = self.q_sample(x_start=x, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, c) loss_simple = self.get_loss(model_output, noise, mean=False).mean([1, 2, 3]) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t loss = self.l_simple_weight * loss.mean() x_recon = self.predict_start_from_noise(x, t=t, noise=model_output) return loss, x_recon def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
11
2023-12-12 00:49:11+00:00
16k
nox-410/tvm.tl
python/tvm/topi/arm_cpu/conv2d_gemm.py
[ { "identifier": "get_const_tuple", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_tuple(in_tuple):\n \"\"\"Verifies input tuple is IntImm or Var, returns tuple of int or Var.\n\n Parameters\n ----------\n in_tuple : tuple of Expr\n The input.\n\n Returns\n -------\n out_tuple : tuple of int\n The output.\n \"\"\"\n ret = []\n ana = None\n for elem in in_tuple:\n if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):\n ret.append(elem)\n elif not isinstance(elem, (tvm.tir.IntImm, int)):\n ana = tvm.arith.Analyzer() if ana is None else ana\n elem = ana.simplify(elem)\n if not isinstance(elem, tvm.tir.IntImm):\n ret.append(elem)\n else:\n ret.append(get_const_int(elem))\n else:\n ret.append(get_const_int(elem))\n return tuple(ret)" }, { "identifier": "get_const_int", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_int(expr):\n \"\"\"Verifies expr is integer and get the constant value.\n\n Parameters\n ----------\n expr : tvm.Expr or int\n The input expression.\n\n Returns\n -------\n out_value : int\n The output.\n \"\"\"\n if isinstance(expr, Integral):\n return expr\n if not isinstance(expr, tvm.tir.IntImm):\n ana = tvm.arith.Analyzer()\n expr = ana.simplify(expr)\n if not isinstance(expr, tvm.tir.IntImm):\n raise ValueError(\"Expect value to be constant int\")\n return int(expr.value)" }, { "identifier": "get_pad_tuple", "path": "python/tvm/topi/nn/utils.py", "snippet": "def get_pad_tuple(padding, kernel):\n \"\"\"Common code to get the pad option\n\n Parameters\n ----------\n padding : int or str\n Padding size, or ['VALID', 'SAME']\n\n kernel : tuple of int\n Conv kernel size\n\n Returns\n -------\n pad_top : int\n Padding size on top\n\n pad_left : int\n Padding size on left\n\n pad_down : int\n Padding size on down.\n\n pad_right : int\n Padding size on right.\n \"\"\"\n # compute the padding size\n if isinstance(padding, (tuple, list)):\n if len(padding) == 2:\n pad_h = padding[0] * 2\n pad_w = padding[1] * 2\n elif len(padding) == 4:\n return padding[0], padding[1], padding[2], padding[3]\n else:\n raise ValueError(\"Size of padding can only be 2 or 4\")\n elif isinstance(padding, int):\n pad_h = pad_w = padding * 2\n elif padding == \"VALID\":\n pad_h = 0\n pad_w = 0\n elif padding == \"SAME\":\n pad_h = kernel[0] - 1\n pad_w = kernel[1] - 1\n else:\n raise ValueError(f\"Unknown padding option {padding}\")\n pad_top = (pad_h + 1) // 2\n pad_left = (pad_w + 1) // 2\n return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left" }, { "identifier": "gemm_4x4_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type):\n \"\"\"\n Int8 4x4 matrix multiplication and accumulation using a sequence of\n umull -> uadalp -> umull2 -> uadalp instructions. This function\n takes two arrays of int8 data type A[4][K] and B[4][K], and produces\n a 4x4 matrix which is equal to A*B'.\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_4x4_int8_int8_int32(int8 A[4][K], int8 B[4][K], int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < K; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n M : int\n rows of the matrix A\n N : int\n columns of the matrix B\n K : int\n columns of matrix A\n unroll : bool\n Unroll the loop accumulation if True\n in_type : str, {'uint8', 'int8'}\n\n Returns\n -------\n intrin : TensorIntrin\n The ARM uint8/int8 TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert in_type in [\"uint8\", \"int8\"]\n A = te.placeholder((K // 16, te.var(\"m\"), 16), dtype=in_type, name=\"A\")\n B = te.placeholder((K // 16, te.var(\"n\"), 16), dtype=in_type, name=\"B\")\n dtype_vec = in_type + \"x16\"\n idxm = tvm.tir.indexmod\n\n k = te.reduce_axis((0, K), \"k\")\n C = te.compute(\n (te.var(\"m\"), te.var(\"n\")),\n lambda x, y: te.sum(\n A[k // 16, x, idxm(k, 16)].astype(\"int32\") * B[k // 16, y, idxm(k, 16)].astype(\"int32\"),\n axis=k,\n ),\n name=\"C\",\n )\n\n a_buffer = tvm.tir.decl_buffer(\n A.shape,\n dtype=in_type,\n name=\"a_buffer\",\n offset_factor=1,\n strides=[te.var(\"sa_1\"), te.var(\"sa_2\"), 1],\n )\n\n b_buffer = tvm.tir.decl_buffer(\n B.shape,\n dtype=in_type,\n name=\"b_buffer\",\n offset_factor=1,\n strides=[te.var(\"sb_1\"), te.var(\"sb_2\"), 1],\n )\n\n c_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"c_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n # Intrinsics used in the following algorithm\n umull_intrin = \"llvm.aarch64.neon.umull\" if in_type == \"uint8\" else \"llvm.aarch64.neon.smull\"\n uaddlp_intrin = \"llvm.aarch64.neon.uaddlp\" if in_type == \"uint8\" else \"llvm.aarch64.neon.saddlp\"\n addp_intrin = \"llvm.aarch64.neon.addp\"\n\n def uadalp(a, b):\n \"\"\"Add pair and accumulate\n\n Parameters:\n ----------\n a: int16x8 vector\n b: int16x8 vector\n\n Returns:\n --------\n return a int32x4 vector\n\n Pseudocode:\n ----------\n a += (b0+b1, b2+b3, b4+b5, b6+b7)\n \"\"\"\n\n return a + tvm.tir.call_llvm_pure_intrin(\n \"int32x4\", uaddlp_intrin, tvm.tir.const(1, \"uint32\"), b\n )\n\n def umull(a, b):\n \"\"\"Multiply long (higher part)\n\n Parameters:\n ----------\n a: int8x16 vector\n b: int8x16 vector\n\n Returns:\n --------\n return a int16x8 vector\n\n Pseudocode:\n ----------\n c = (a0*b0, a1*b1, a2*b2, a3*b3, a4*b4, a5*b5, a6*b6, a7*b7)\n \"\"\"\n a_high = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorhigh\", a)\n b_high = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorhigh\", b)\n c = tvm.tir.call_llvm_pure_intrin(\n \"int16x8\", umull_intrin, tvm.tir.const(2, \"uint32\"), a_high, b_high\n )\n return c\n\n def umull2(a, b):\n \"\"\"Multiply long (lower part)\n\n Parameters:\n ----------\n a: int8x16 vector\n b: int8x16 vector\n\n Returns:\n --------\n return a int16x8 vector\n\n Pseudocode:\n ----------\n c = (a8*b8, a9*b9, a10*b10, a11*b11, a12*b12, a13*b13, a14*b14, a15*b15)\n \"\"\"\n a_low = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorlow\", a)\n b_low = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorlow\", b)\n c = tvm.tir.call_llvm_pure_intrin(\n \"int16x8\", umull_intrin, tvm.tir.const(2, \"uint32\"), a_low, b_low\n )\n return c\n\n def addp(a, b):\n \"\"\"Add two vectors in pairs\n\n Parameters:\n ----------\n a: int32x4 vector\n b: int32x4 vector\n\n Returns:\n --------\n return a int32x4 vector\n\n Pseudocode:\n ----------\n c = (a0+a1, a2+a3, b0+b1, b0+b3)\n \"\"\"\n return tvm.tir.call_llvm_pure_intrin(\n \"int32x4\", addp_intrin, tvm.tir.const(2, \"uint32\"), a, b\n )\n\n def accumulation_loop(M, N, ins, acc, tile_idx):\n \"\"\"Internal tile accumulation. This function\n takes two arrays of int8 data type A[tile_idx][4][16] and B[tile_idx][4][16], produces\n a 4x4 matrix which is equal to A*B' and accumulates into C[4][4]\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_4x4_int8_int8_int32(int8 A[tile_idx][4][K],\n int8 B[tile_idx][4][K],\n int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < 16; k++){\n C[i][j] += A[tile_idx][i][k] * B[tile_idx][j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters:\n ----------\n M : int\n Number of total rows of the output matrix\n N : int\n Number of total columns of the output matrix\n ins : list of tvm.tir.buffer\n Input buffers\n acc : tvm.tir.ir_builder.BufferVar\n Bank of register accumulators\n tiled_idx : int\n Index of a sub-tile of A and B in A[tile_idx][:][:] and B[tile_idx][:][:].\n Please note that 0 <= tile_idx <= K//16\n\n \"\"\"\n a0 = ins[0].vload([tile_idx, 0, 0], dtype_vec)\n a1 = tvm.tir.const(0, \"int8x16\")\n if M > 1:\n a1 = ins[0].vload([tile_idx, 1, 0], dtype_vec)\n a2 = tvm.tir.const(0, \"int8x16\")\n if M > 2:\n a2 = ins[0].vload([tile_idx, 2, 0], dtype_vec)\n a3 = tvm.tir.const(0, \"int8x16\")\n if M > 3:\n a3 = ins[0].vload([tile_idx, 3, 0], dtype_vec)\n\n b0 = ins[1].vload([tile_idx, 0, 0], dtype_vec)\n b1 = tvm.tir.const(0, \"int8x16\")\n if N > 1:\n b1 = ins[1].vload([tile_idx, 1, 0], dtype_vec)\n b2 = tvm.tir.const(0, \"int8x16\")\n if N > 2:\n b2 = ins[1].vload([tile_idx, 2, 0], dtype_vec)\n b3 = tvm.tir.const(0, \"int8x16\")\n if N > 3:\n b3 = ins[1].vload([tile_idx, 3, 0], dtype_vec)\n\n # First half\n # Lower part of a0 * {b0,b1,b2,b3}\n d00 = umull(a0, b0)\n d01 = umull(a0, b1)\n d02 = umull(a0, b2)\n d03 = umull(a0, b3)\n\n # Lower part of a1 * {b0,b1,b2,b3}\n d10 = umull(a1, b0)\n d11 = umull(a1, b1)\n d12 = umull(a1, b2)\n d13 = umull(a1, b3)\n\n # Accumulate\n acc[0] = uadalp(acc[0], d00)\n acc[1] = uadalp(acc[1], d01)\n acc[2] = uadalp(acc[2], d02)\n acc[3] = uadalp(acc[3], d03)\n acc[4] = uadalp(acc[4], d10)\n acc[5] = uadalp(acc[5], d11)\n acc[6] = uadalp(acc[6], d12)\n acc[7] = uadalp(acc[7], d13)\n\n # Higher part of a0 * {b0,b1,b2,b3}\n d00 = umull2(a0, b0)\n d01 = umull2(a0, b1)\n d02 = umull2(a0, b2)\n d03 = umull2(a0, b3)\n\n # Higher part of a1 * {b0,b1,b2,b3}\n d10 = umull2(a1, b0)\n d11 = umull2(a1, b1)\n d12 = umull2(a1, b2)\n d13 = umull2(a1, b3)\n\n # Accumulate again\n acc[0] = uadalp(acc[0], d00)\n acc[1] = uadalp(acc[1], d01)\n acc[2] = uadalp(acc[2], d02)\n acc[3] = uadalp(acc[3], d03)\n acc[4] = uadalp(acc[4], d10)\n acc[5] = uadalp(acc[5], d11)\n acc[6] = uadalp(acc[6], d12)\n acc[7] = uadalp(acc[7], d13)\n\n # Second half\n # Lower part of a2 * {b0,b1,b2,b3}\n d00 = umull(a2, b0)\n d01 = umull(a2, b1)\n d02 = umull(a2, b2)\n d03 = umull(a2, b3)\n\n # Lower part of a3 * {b0,b1,b2,b3}\n d10 = umull(a3, b0)\n d11 = umull(a3, b1)\n d12 = umull(a3, b2)\n d13 = umull(a3, b3)\n\n # Accumulate\n acc[8] = uadalp(acc[8], d00)\n acc[9] = uadalp(acc[9], d01)\n acc[10] = uadalp(acc[10], d02)\n acc[11] = uadalp(acc[11], d03)\n acc[12] = uadalp(acc[12], d10)\n acc[13] = uadalp(acc[13], d11)\n acc[14] = uadalp(acc[14], d12)\n acc[15] = uadalp(acc[15], d13)\n\n # Higher part of a2 * {b0,b1,b2,b3}\n d00 = umull2(a2, b0)\n d01 = umull2(a2, b1)\n d02 = umull2(a2, b2)\n d03 = umull2(a2, b3)\n\n # Lower part of a3 * {b0,b1,b2,b3}\n d10 = umull2(a3, b0)\n d11 = umull2(a3, b1)\n d12 = umull2(a3, b2)\n d13 = umull2(a3, b3)\n\n # Accumulate\n acc[8] = uadalp(acc[8], d00)\n acc[9] = uadalp(acc[9], d01)\n acc[10] = uadalp(acc[10], d02)\n acc[11] = uadalp(acc[11], d03)\n acc[12] = uadalp(acc[12], d10)\n acc[13] = uadalp(acc[13], d11)\n acc[14] = uadalp(acc[14], d12)\n acc[15] = uadalp(acc[15], d13)\n\n def _intrin_func(ins, outs):\n def _instr():\n ib = tvm.tir.ir_builder.create()\n # Allocate a local buffer (possibly translates to registers)\n acc = ib.allocate(\"int32x4\", 16, name=\"accs\", scope=\"local\")\n m = outs[0].shape[0]\n n = outs[0].shape[1]\n # Initialization\n for i in range(0, 16):\n acc[i] = tvm.tir.const(0, \"int32x4\")\n\n if unroll:\n for i in range(0, int(K // 16)):\n accumulation_loop(M, N, ins, acc, i)\n else:\n with ib.for_range(0, K // 16, name=\"i\") as i:\n accumulation_loop(M, N, ins, acc, i)\n\n # Final accumulations\n # acc[4*r + c] contains the partial accumulations of element C[r][c]\n #\n # In particular:\n # acc[4*r] contains the partial sums of a[r,0:K].*b[0,0:K] -> (a,b,c,d)\n # acc[4*r+1] contains the partial sums of a[r, 0:K].*b[1,0:K] -> (e,f,g,h)\n # acc[4*r+2] contains the partial sums of a[r, 0:K].*b[2,0:K] -> (i,j,k,l)\n # acc[4*r+3] contains the partial sums of a[r, 0:K].*b[3,0:K] -> (m,n,o,p)\n #\n # Please note that 0<= r, c < 4\n\n acc[0] = addp(acc[0], acc[1]) # (a+b, c+d, e+f, g+h)\n acc[1] = addp(acc[2], acc[3]) # (i+j, k+l, m+n, o+p)\n acc[0] = addp(acc[0], acc[1]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[4] = addp(acc[4], acc[5]) # (a+b, c+d, e+f, g+h)\n acc[5] = addp(acc[6], acc[7]) # (i+j, k+l, m+n, o+p)\n acc[4] = addp(acc[4], acc[5]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[8] = addp(acc[8], acc[9]) # (a+b, c+d, e+f, g+h)\n acc[9] = addp(acc[10], acc[11]) # (i+j, k+l, m+n, o+p)\n acc[8] = addp(acc[8], acc[9]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[12] = addp(acc[12], acc[13]) # (a+b, c+d, e+f, g+h)\n acc[13] = addp(acc[14], acc[15]) # (i+j, k+l, m+n, o+p)\n acc[12] = addp(acc[12], acc[13]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n # Store the result\n if N > 3:\n out_0 = acc[0]\n out_1 = acc[4]\n out_2 = acc[8]\n out_3 = acc[12]\n elif N > 2:\n out_0 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[12])\n elif N > 1:\n out_0 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[12])\n else:\n out_0 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[12])\n\n ib.emit(outs[0].vstore([0, 0], out_0))\n if M > 1:\n ib.emit(outs[0].vstore([1, 0], out_1))\n if M > 2:\n ib.emit(outs[0].vstore([2, 0], out_2))\n if M > 3:\n ib.emit(outs[0].vstore([3, 0], out_3))\n return ib.get()\n\n # body, reset, update\n return _instr()\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: a_buffer, B: b_buffer, C: c_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_4x4_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_4x4_int8_int8_int32(dtype):\n \"\"\"\n Int8 4x4 matrix multiplication and accumulation using sdot/udot\n instructions. This function takes two arrays of int8 datatype\n -- A[4][4] and B[4][4] and produces a 4x4 matrix\n which is equal to A*B'.\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_acc_4x4_int8_int8_int32(int8 A[4][4], int8 B[4][4], int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < 4; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n # This needs to be a variable number of \"rows\" since TVM\n # \"thinks\" I only need to compute one row because of\n # padding\n A = te.placeholder((te.var(\"rows\"), 4), dtype, name=\"A\")\n B = te.placeholder((4, 4), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n\n k = te.reduce_axis((0, 4), name=\"k\")\n C = te.compute(\n (te.var(\"rows\"), 4),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.sdot\" if dtype == \"int8\" else \"llvm.aarch64.neon.udot\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n for i in range(0, 4):\n ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, \"int32x4\")))\n return ib.get()\n # Load all the elements of tile A.\n # vec_a = [a, b, c, d,\n # e, f, g, h,\n # l, m, n, o,\n # p, q, r, s];\n vec_a = ins[0].vload([0, 0], dtype_vec)\n\n # Replicate 4 times the i-th row of A. For instance,\n # vec_a[0] = [a, b, c, d,\n # a, b, c, d,\n # a, b, c, d,\n # a, b, c, d,];\n vec_aa = [select_word(vec_a, i, dtype_vec) for i in range(0, 4)]\n\n # Load all the elements of B. Remember that B\n # is transposed:\n # vec_b = [0, 4, 8, 12,\n # 1, 5, 9, 13,\n # 2, 6, 10, 14,\n # 3, 7, 11, 15,];\n vec_b = ins[1].vload([0, 0], dtype_vec)\n\n # Execute the dot product\n for i in range(0, 4):\n vec_c = outs[0].vload([i, 0], \"int32x4\")\n # Compute the product between the i-th row of A\n # and all the rows of B. Remember that sdot/udot\n # subdive the input vectors in 16 elements\n # and then take the dot product among each group.\n # The result is stored in a int32x4 register\n #\n # For instance, for i=0, we have:\n # sdot(vec_aa[0], vec_b) = [a*0+b*4+c*8+d*12,\n # a*1+b*5+c*9+d*13,\n # a*2+b*6+c*10+d*14,\n # a*3+b*7+c*11+d*15]\n vdot = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_b, vec_aa[i]\n )\n\n # Store the result\n ib.emit(outs[0].vstore([i, 0], vdot))\n\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_nx16_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_nx16_int8_int8_int32(dtype, rows):\n \"\"\"\n Int8 nx16 matrix multiplication and accumulation using sdot/udot instructions\n This function takes two arrays of int8 datatype -- A[n][4] and\n B[4][16] and produces a rowsx16 matrix which is equal to A*B'\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void mmla_nx16_int8_int8_int32(int8 A[n][16], int8 B[4][16][4], int32 output[n][16]){\n for (int i = 0; i < n; i++){\n for (int j = 0; j < 16; j++){\n for (int k = 0; k < 16; k++){\n out[i][j] += A[i][k] * B[k//4][j][k%4]\n }\n }\n }\n }\n\n Notes:\n * The tile size of B is 16x4. Since the reduction variable k moves between 0 and 16\n we need 4 tiles of B to compute a single row of the output. The first 4 values of\n k will be fetched from B[0][j][k], the second batch of 4 from B[1][j][k] and so on\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n rows : int\n Number of the output rows \"n\"\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n A = te.placeholder((rows, 16), dtype, name=\"A\")\n B = te.placeholder((4, 16, 4), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n idxm = tvm.tir.indexmod\n k = te.reduce_axis((0, 16), name=\"k\")\n C = te.compute(\n (rows, 16),\n lambda i, j: te.sum(\n A[i, k].astype(\"int32\") * B[k // 4, j, idxm(k, 4)].astype(\"int32\"), axis=k\n ),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb0\"), te.var(\"sb1\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.sdot\" if dtype == \"int8\" else \"llvm.aarch64.neon.udot\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n for i in range(0, rows):\n ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, \"int32x16\")))\n return ib.get()\n # Iterate on the number of rows of the output\n for k in range(0, rows):\n # Load 16 elements of A\n # vec_a = [a, b, c, d, e, f, g, h, l, m, n, o, p, q, r, s];\n vec_a = ins[0].vload([k, 0], dtype_vec)\n\n # Iterate over each of the 4 rowsx4 tiles of the output\n for j in range(0, 4):\n # Accumulate over each of the 4 (16x4) tiles contained in B\n for i in range(0, 4):\n # Replicate a single 4-element group of A (A[k, i:i+4])\n vec_aa = select_word(vec_a, i, dtype_vec)\n\n # Load 4 rows (each rows with 4 elements) from B (B[i:i+4, j:j+4])\n # vec_b = [0, 16, 32, 48,\n # 1, 17, 33, 49,\n # 2, 18, 34, 50,\n # 3, 19, 35, 51,];\n vec_b = ins[1].vload([i, 4 * j, 0], dtype_vec)\n\n # Accumulate in the correct part of the output\n vec_c = outs[0].vload([k, 4 * j], \"int32x4\")\n\n # Compute the dot product between the rowsx4 tile\n # from A and the 4x4 tile from B\n #\n # For instance, for i=0, we have:\n # sdot(vec_aa[0], vec_b) = [a*0+b*16+c*32+d*48,\n # a*1+b*17+c*33+d*49,\n # a*2+b*18+c*34+d*50,\n # a*3+b*19+c*35+d*51]\n vdot = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_b, vec_aa\n )\n ib.emit(outs[0].vstore([k, 4 * j], vdot))\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_2x2_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_2x2_int8_int8_int32(dtype):\n \"\"\"\n Int8 2x2 matrix multiplication using smmla/ummla instructions\n This function takes two arrays of int8 datatype -- A[2][8] and\n B[2][8] and produces a 2x2 matrix which is equal to A*B'\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void mmla_2x2_int8_int8_int32(int8 A[2][8], int8 B[2][8], int32 C[2][2]){\n for (int i = 0; i < 2; i++){\n for (int j = 0; j < 2; j++){\n for (int k = 0; k < 8; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n A = te.placeholder((2, 8), dtype, name=\"A\")\n B = te.placeholder((2, 8), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n\n k = te.reduce_axis((0, 8), name=\"k\")\n C = te.compute(\n (2, 2),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.smmla\" if dtype == \"int8\" else \"llvm.aarch64.neon.ummla\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n ib.emit(outs[0].vstore([0, 0], tvm.tir.const(0, \"int32x4\")))\n return ib.get()\n # Load in vec_a the two rows of A\n # vec_a = [a, b, c, d, e, f, g, h;\n # i, j, k, l, m, n, o, p,]\n vec_a = ins[0].vload([0, 0], dtype_vec)\n # Load in vec_b the two rows of B\n # vec_b = [0, 2, 4, 6, 8, 10, 12, 14;\n # 1, 3, 5, 7, 9, 11, 13, 14,]\n vec_b = ins[1].vload([0, 0], dtype_vec)\n\n # Execute the matrix multiplication via (s/u)mmla:\n # vec_c = [a*0 + b*2 + c*4 + d*6 +e*8 + f*10 + g*12 + h*14;\n # a*1 + b*3 + c*5 + d*7 +e*9 + f*11 + g*13 + h*15;\n # i*0 + j*2 + k*4 + l*6 +m*8 + n*10 + o*12 + p*14;\n # i*1 + j*3 + k*5 + l*7 +m*9 + n*11 + o*13 + p*15]\n vec_c = outs[0].vload([0, 0], \"int32x4\")\n vmmla = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_a, vec_b\n )\n # Store the result\n ib.emit(outs[0].vstore([0, 0], vmmla))\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" } ]
import tvm from tvm.target import Target from tvm import te from tvm.topi import nn from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity from ..utils import get_const_tuple, get_const_int from ..nn.utils import get_pad_tuple from .tensor_intrin import ( gemm_4x4_int8_int8_int32, gemm_acc_4x4_int8_int8_int32, gemm_acc_nx16_int8_int8_int32, gemm_acc_2x2_int8_int8_int32, )
10,903
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
2
2023-12-14 02:37:47+00:00
16k
yolain/ComfyUI-Easy-Use
py/easyNodes.py
[ { "identifier": "advanced_encode", "path": "py/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):\n embs_l = None\n embs_g = None\n pooled = None\n if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):\n embs_l, _ = advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n if 'g' in tokenized:\n embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x,\n encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n return prepareXL(embs_l, embs_g, pooled, clip_balance)\n else:\n return advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: (clip.encode_from_tokens({'l': x}), None),\n w_max=w_max)" }, { "identifier": "advanced_encode_XL", "path": "py/adv_encode.py", "snippet": "def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized1 = clip.tokenize(text1, return_word_ids=True)\n tokenized2 = clip.tokenize(text2, return_word_ids=True)\n\n embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n\n embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n\n gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])\n repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])\n repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])\n\n return prepareXL(embs_l.expand((-1, repeat_l, -1)), embs_g.expand((-1, repeat_g, -1)), pooled, clip_balance)" }, { "identifier": "BASE_RESOLUTIONS", "path": "py/config.py", "snippet": "BASE_RESOLUTIONS = [\n (\"自定义\", \"自定义\"),\n (512, 512),\n (512, 768),\n (768, 512),\n (576, 1024),\n (768, 1024),\n (768, 1280),\n (768, 1344),\n (768, 1536),\n (816, 1920),\n (832, 1152),\n (896, 1152),\n (896, 1088),\n (1024, 1024),\n (1024, 576),\n (1024, 768),\n (1080, 1920),\n (1440, 2560),\n (1088, 896),\n (1152, 832),\n (1152, 896),\n (1280, 768),\n (1344, 768),\n (1536, 640),\n (1536, 768),\n (1920, 816),\n (1920, 1080),\n (2560, 1440),\n]" }, { "identifier": "log_node_info", "path": "py/log.py", "snippet": "def log_node_info(node_name, message=None):\n \"\"\"Logs an info message.\"\"\"\n _log_node(COLORS_FG[\"CYAN\"], node_name, message)" }, { "identifier": "log_node_error", "path": "py/log.py", "snippet": "def log_node_error(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"RED\"], node_name, message)" }, { "identifier": "log_node_warn", "path": "py/log.py", "snippet": "def log_node_warn(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"YELLOW\"], node_name, message)" }, { "identifier": "log_node_success", "path": "py/log.py", "snippet": "def log_node_success(node_name, message=None):\n \"\"\"Logs a success message.\"\"\"\n _log_node(COLORS_FG[\"GREEN\"], node_name, message)" }, { "identifier": "process_with_loras", "path": "py/wildcards.py", "snippet": "def process_with_loras(wildcard_opt, model, clip, title=\"Positive\", seed=None, can_load_lora=True, pipe_lora_stack=[]):\n lora_name_cache = []\n\n pass1 = process(wildcard_opt, seed)\n loras = extract_lora_values(pass1)\n pass2 = remove_lora_tags(pass1)\n\n has_noodle_key = True if \"__\" in wildcard_opt else False\n has_loras = True if loras != [] else False\n show_wildcard_prompt = True if has_noodle_key or has_loras else False\n\n for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b in loras:\n if (lora_name.split('.')[-1]) not in folder_paths.supported_pt_extensions:\n lora_name = lora_name+\".safetensors\"\n\n lora_name = resolve_lora_name(lora_name_cache, lora_name)\n\n path = folder_paths.get_full_path(\"loras\", lora_name)\n\n if path is not None:\n print(f\"LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}\")\n\n def default_lora():\n return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight)\n\n if lbw is not None:\n cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire']\n if can_load_lora:\n model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, \"\", lbw)\n pipe_lora_stack.append({\n \"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight,\n \"lora_clip_strength\": clip_weight,\n \"lbw_a\": lbw_a,\n \"lbw_b\": lbw_b,\n \"lbw\": lbw\n })\n else:\n pipe_lora_stack.append({\"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight, \"lora_clip_strength\": clip_weight})\n if can_load_lora:\n model, clip = default_lora()\n else:\n print(f\"LORA NOT FOUND: {lora_name}\")\n\n # print(f\"{title}: {pass2}\")\n # print(f'{title}_decode:', pass1)\n\n return model, clip, pass2, pass1, show_wildcard_prompt, pipe_lora_stack" }, { "identifier": "get_wildcard_list", "path": "py/wildcards.py", "snippet": "def get_wildcard_list():\n return [f\"__{x}__\" for x in easy_wildcard_dict.keys()]" }, { "identifier": "sample_dpmpp_2s_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2s_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with DPM-Solver++(2S) second-order steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigma_down == 0:\n # Euler method\n d = to_d(x, sigmas[i], denoised)\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n else:\n # DPM-Solver++(2S)\n t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)\n r = 1 / 2\n h = t_next - t\n s = t + r * h\n x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised\n denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)\n x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2\n # Noise addition\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "sample_dpmpp_2m_sde", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2m_sde(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n solver_type=\"midpoint\",\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"DPM-Solver++(2M) SDE.\"\"\"\n\n if solver_type not in {\"heun\", \"midpoint\"}:\n raise ValueError(\"solver_type must be 'heun' or 'midpoint'\")\n\n seed = extra_args.get(\"seed\", None)\n sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n old_denoised = None\n h_last = None\n h = None\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigmas[i + 1] == 0:\n # Denoising step\n x = denoised\n else:\n # DPM-Solver++(2M) SDE\n t, s = -sigmas[i].log(), -sigmas[i + 1].log()\n h = s - t\n eta_h = eta * h\n\n x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised\n\n if old_denoised is not None:\n r = h_last / h\n if solver_type == \"heun\":\n x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)\n elif solver_type == \"midpoint\":\n x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)\n\n if eta:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n denoised = None # 次ステップとサイズがあわないのでとりあえずNoneにしておく。\n noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True)\n x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise\n\n old_denoised = denoised\n h_last = h\n return x" }, { "identifier": "sample_lcm", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_lcm(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n noise_sampler=None,\n eta=None,\n s_noise=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n\n x = denoised\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])\n\n return x" }, { "identifier": "sample_euler_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_euler_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with Euler method steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n d = to_d(x, sigmas[i], denoised)\n # Euler method\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "DynThresh", "path": "py/dynthres_core.py", "snippet": "class DynThresh:\n\n Modes = [\"Constant\", \"Linear Down\", \"Cosine Down\", \"Half Cosine Down\", \"Linear Up\", \"Cosine Up\", \"Half Cosine Up\", \"Power Up\", \"Power Down\", \"Linear Repeating\", \"Cosine Repeating\", \"Sawtooth\"]\n Startpoints = [\"MEAN\", \"ZERO\"]\n Variabilities = [\"AD\", \"STD\"]\n\n def __init__(self, mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, max_steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi):\n self.mimic_scale = mimic_scale\n self.threshold_percentile = threshold_percentile\n self.mimic_mode = mimic_mode\n self.cfg_mode = cfg_mode\n self.max_steps = max_steps\n self.cfg_scale_min = cfg_scale_min\n self.mimic_scale_min = mimic_scale_min\n self.experiment_mode = experiment_mode\n self.sched_val = sched_val\n self.sep_feat_channels = separate_feature_channels\n self.scaling_startpoint = scaling_startpoint\n self.variability_measure = variability_measure\n self.interpolate_phi = interpolate_phi\n\n def interpret_scale(self, scale, mode, min):\n scale -= min\n max = self.max_steps - 1\n frac = self.step / max\n if mode == \"Constant\":\n pass\n elif mode == \"Linear Down\":\n scale *= 1.0 - frac\n elif mode == \"Half Cosine Down\":\n scale *= math.cos(frac)\n elif mode == \"Cosine Down\":\n scale *= math.cos(frac * 1.5707)\n elif mode == \"Linear Up\":\n scale *= frac\n elif mode == \"Half Cosine Up\":\n scale *= 1.0 - math.cos(frac)\n elif mode == \"Cosine Up\":\n scale *= 1.0 - math.cos(frac * 1.5707)\n elif mode == \"Power Up\":\n scale *= math.pow(frac, self.sched_val)\n elif mode == \"Power Down\":\n scale *= 1.0 - math.pow(frac, self.sched_val)\n elif mode == \"Linear Repeating\":\n portion = (frac * self.sched_val) % 1.0\n scale *= (0.5 - portion) * 2 if portion < 0.5 else (portion - 0.5) * 2\n elif mode == \"Cosine Repeating\":\n scale *= math.cos(frac * 6.28318 * self.sched_val) * 0.5 + 0.5\n elif mode == \"Sawtooth\":\n scale *= (frac * self.sched_val) % 1.0\n scale += min\n return scale\n\n def dynthresh(self, cond, uncond, cfg_scale, weights):\n mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)\n cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min)\n # uncond shape is (batch, 4, height, width)\n conds_per_batch = cond.shape[0] / uncond.shape[0]\n assert conds_per_batch == int(conds_per_batch), \"Expected # of conds per batch to be constant across batches\"\n cond_stacked = cond.reshape((-1, int(conds_per_batch)) + uncond.shape[1:])\n\n ### Normal first part of the CFG Scale logic, basically\n diff = cond_stacked - uncond.unsqueeze(1)\n if weights is not None:\n diff = diff * weights\n relative = diff.sum(1)\n\n ### Get the normal result for both mimic and normal scale\n mim_target = uncond + relative * mimic_scale\n cfg_target = uncond + relative * cfg_scale\n ### If we weren't doing mimic scale, we'd just return cfg_target here\n\n ### Now recenter the values relative to their average rather than absolute, to allow scaling from average\n mim_flattened = mim_target.flatten(2)\n cfg_flattened = cfg_target.flatten(2)\n mim_means = mim_flattened.mean(dim=2).unsqueeze(2)\n cfg_means = cfg_flattened.mean(dim=2).unsqueeze(2)\n mim_centered = mim_flattened - mim_means\n cfg_centered = cfg_flattened - cfg_means\n\n if self.sep_feat_channels:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std(dim=2).unsqueeze(2)\n cfg_scaleref = cfg_centered.std(dim=2).unsqueeze(2)\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max(dim=2).values.unsqueeze(2)\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile, dim=2).unsqueeze(2)\n\n else:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std()\n cfg_scaleref = cfg_centered.std()\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max()\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile)\n\n if self.scaling_startpoint == 'ZERO':\n scaling_factor = mim_scaleref / cfg_scaleref\n result = cfg_flattened * scaling_factor\n\n else: # 'MEAN'\n if self.variability_measure == 'STD':\n cfg_renormalized = (cfg_centered / cfg_scaleref) * mim_scaleref\n else: # 'AD'\n ### Get the maximum value of all datapoints (with an optional threshold percentile on the uncond)\n max_scaleref = torch.maximum(mim_scaleref, cfg_scaleref)\n ### Clamp to the max\n cfg_clamped = cfg_centered.clamp(-max_scaleref, max_scaleref)\n ### Now shrink from the max to normalize and grow to the mimic scale (instead of the CFG scale)\n cfg_renormalized = (cfg_clamped / max_scaleref) * mim_scaleref\n\n ### Now add it back onto the averages to get into real scale again and return\n result = cfg_renormalized + cfg_means\n\n actual_res = result.unflatten(2, mim_target.shape[2:])\n\n if self.interpolate_phi != 1.0:\n actual_res = actual_res * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)\n\n if self.experiment_mode == 1:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n if num[0][0][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][1][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][2][y][x] > 1.5:\n num[0][2][y][x] *= 0.5\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 2:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n over_scale = False\n for z in range(0, 4):\n if abs(num[0][z][y][x]) > 1.5:\n over_scale = True\n if over_scale:\n for z in range(0, 4):\n num[0][z][y][x] *= 0.7\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 3:\n coefs = torch.tensor([\n # R G B W\n [0.298, 0.207, 0.208, 0.0], # L1\n [0.187, 0.286, 0.173, 0.0], # L2\n [-0.158, 0.189, 0.264, 0.0], # L3\n [-0.184, -0.271, -0.473, 1.0], # L4\n ], device=uncond.device)\n res_rgb = torch.einsum(\"laxy,ab -> lbxy\", actual_res, coefs)\n max_r, max_g, max_b, max_w = res_rgb[0][0].max(), res_rgb[0][1].max(), res_rgb[0][2].max(), res_rgb[0][3].max()\n max_rgb = max(max_r, max_g, max_b)\n print(f\"test max = r={max_r}, g={max_g}, b={max_b}, w={max_w}, rgb={max_rgb}\")\n if self.step / (self.max_steps - 1) > 0.2:\n if max_rgb < 2.0 and max_w < 3.0:\n res_rgb /= max_rgb / 2.4\n else:\n if max_rgb > 2.4 and max_w > 3.0:\n res_rgb /= max_rgb / 2.4\n actual_res = torch.einsum(\"laxy,ab -> lbxy\", res_rgb, coefs.inverse())\n\n return actual_res" } ]
import sys import os import re import json import time import math import torch import psutil import random import datetime import comfy.sd import comfy.utils import numpy as np import folder_paths import comfy.samplers import comfy.controlnet import latent_preview import comfy.model_base import comfy.model_management from pathlib import Path from comfy.sd import CLIP, VAE from comfy.cli_args import args from urllib.request import urlopen from collections import defaultdict from PIL.PngImagePlugin import PngInfo from PIL import Image, ImageDraw, ImageFont from comfy.model_patcher import ModelPatcher from comfy_extras.chainner_models import model_loading from typing import Dict, List, Optional, Tuple, Union, Any from .adv_encode import advanced_encode, advanced_encode_XL from server import PromptServer from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask from comfy_extras.nodes_mask import LatentCompositeMasked from .config import BASE_RESOLUTIONS from .log import log_node_info, log_node_error, log_node_warn, log_node_success from .wildcards import process_with_loras, get_wildcard_list from comfy_extras.nodes_stable3d import camera_embeddings from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral from .dynthres_core import DynThresh
10,885
# ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder)
# 加载器 class easyLoader: def __init__(self): self.loaded_objects = { "ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)} "clip": defaultdict(tuple), "clip_vision": defaultdict(tuple), "bvae": defaultdict(tuple), "vae": defaultdict(object), "lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}} } self.memory_threshold = self.determine_memory_threshold(0.7) def clean_values(self, values: str): original_values = values.split("; ") cleaned_values = [] for value in original_values: cleaned_value = value.strip(';').strip() if cleaned_value == "": continue try: cleaned_value = int(cleaned_value) except ValueError: try: cleaned_value = float(cleaned_value) except ValueError: pass cleaned_values.append(cleaned_value) return cleaned_values def clear_unused_objects(self, desired_names: set, object_type: str): keys = set(self.loaded_objects[object_type].keys()) for key in keys - desired_names: del self.loaded_objects[object_type][key] def get_input_value(self, entry, key): val = entry["inputs"][key] return val if isinstance(val, str) else val[0] def process_pipe_loader(self, entry, desired_ckpt_names, desired_vae_names, desired_lora_names, desired_lora_settings, num_loras=3, suffix=""): for idx in range(1, num_loras + 1): lora_name_key = f"{suffix}lora{idx}_name" desired_lora_names.add(self.get_input_value(entry, lora_name_key)) setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name")) desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name")) def update_loaded_objects(self, prompt): desired_ckpt_names = set() desired_vae_names = set() desired_lora_names = set() desired_lora_settings = set() for entry in prompt.values(): class_type = entry["class_type"] if class_type == "easy a1111Loader" or class_type == "easy comfyLoader": lora_name = self.get_input_value(entry, "lora_name") desired_lora_names.add(lora_name) setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader': desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy XYInputs: ModelMergeBlocks": desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1")) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2")) vae_use = self.get_input_value(entry, "vae_use") if vae_use != 'Use Model 1' and vae_use != 'Use Model 2': desired_vae_names.add(vae_use) object_types = ["ckpt", "clip", "bvae", "vae", "lora"] for object_type in object_types: desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names self.clear_unused_objects(desired_names, object_type) def add_to_cache(self, obj_type, key, value): """ Add an item to the cache with the current timestamp. """ timestamped_value = (value, time.time()) self.loaded_objects[obj_type][key] = timestamped_value def determine_memory_threshold(self, percentage=0.8): """ Determines the memory threshold as a percentage of the total available memory. Args: - percentage (float): The fraction of total memory to use as the threshold. Should be a value between 0 and 1. Default is 0.8 (80%). Returns: - memory_threshold (int): Memory threshold in bytes. """ total_memory = psutil.virtual_memory().total memory_threshold = total_memory * percentage return memory_threshold def get_memory_usage(self): """ Returns the memory usage of the current process in bytes. """ process = psutil.Process(os.getpid()) return process.memory_info().rss def eviction_based_on_memory(self): """ Evicts objects from cache based on memory usage and priority. """ current_memory = self.get_memory_usage() if current_memory < self.memory_threshold: return eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"] for obj_type in eviction_order: if current_memory < self.memory_threshold: break # Sort items based on age (using the timestamp) items = list(self.loaded_objects[obj_type].items()) items.sort(key=lambda x: x[1][1]) # Sorting by timestamp for item in items: if current_memory < self.memory_threshold: break del self.loaded_objects[obj_type][item[0]] current_memory = self.get_memory_usage() def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False): cache_name = ckpt_name if config_name not in [None, "Default"]: cache_name = ckpt_name + "_" + config_name if cache_name in self.loaded_objects["ckpt"]: cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0] return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0] ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) output_clip = False if load_vision else True output_clipvision = True if load_vision else False if config_name not in [None, "Default"]: config_path = folder_paths.get_full_path("configs", config_name) loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) else: loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) self.add_to_cache("ckpt", cache_name, loaded_ckpt[0]) self.add_to_cache("bvae", cache_name, loaded_ckpt[2]) if load_vision: out = loaded_ckpt[3] self.add_to_cache("clip_vision", cache_name, out) else: out = loaded_ckpt[1] self.add_to_cache("clip", cache_name, loaded_ckpt[1]) self.eviction_based_on_memory() return loaded_ckpt[0], out, loaded_ckpt[2] def load_vae(self, vae_name): if vae_name in self.loaded_objects["vae"]: return self.loaded_objects["vae"][vae_name][0] vae_path = folder_paths.get_full_path("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) loaded_vae = comfy.sd.VAE(sd=sd) self.add_to_cache("vae", vae_name, loaded_vae) self.eviction_based_on_memory() return loaded_vae def load_lora(self, lora_name, model, clip, strength_model, strength_clip): model_hash = str(model)[44:-1] clip_hash = str(clip)[25:-1] unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}' if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]: return self.loaded_objects["lora"][unique_id][0] lora_path = folder_paths.get_full_path("loras", lora_name) lora = comfy.utils.load_torch_file(lora_path, safe_load=True) model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) self.add_to_cache("lora", unique_id, (model_lora, clip_lora)) self.eviction_based_on_memory() return model_lora, clip_lora # 采样器 class easySampler: def __init__(self): self.last_helds: dict[str, list] = { "results": [], "pipe_line": [], } @staticmethod def tensor2pil(image: torch.Tensor) -> Image.Image: """Convert a torch tensor to a PIL image.""" return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) @staticmethod def pil2tensor(image: Image.Image) -> torch.Tensor: """Convert a PIL image to a torch tensor.""" return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) @staticmethod def enforce_mul_of_64(d): d = int(d) if d <= 7: d = 8 leftover = d % 8 # 8 is the number of pixels per byte if leftover != 0: # if the number of pixels is not a multiple of 8 if (leftover < 4): # if the number of pixels is less than 4 d -= leftover # remove the leftover pixels else: # if the number of pixels is more than 4 d += 8 - leftover # add the leftover pixels return int(d) @staticmethod def safe_split(to_split: str, delimiter: str) -> List[str]: """Split the input string and return a list of non-empty parts.""" parts = to_split.split(delimiter) parts = [part for part in parts if part not in ('', ' ', ' ')] while len(parts) < 2: parts.append('None') return parts def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent, disable_noise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: """Retrieve value by its associated ID.""" try: for value, id_ in self.last_helds[key]: if id_ == my_unique_id: return value except KeyError: return None def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" try: for i, (value, id_) in enumerate(self.last_helds[key]): if id_ == my_unique_id: self.last_helds[key][i] = (new_value, id_) return True self.last_helds[key].append((new_value, my_unique_id)) return False except KeyError: return False def upscale(self, samples, upscale_method, scale_by, crop): s = samples.copy() width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) if (width > MAX_RESOLUTION): width = MAX_RESOLUTION if (height > MAX_RESOLUTION): height = MAX_RESOLUTION s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) return (s,) def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: """Upscale the samples if the upscale_method is not set to 'None'.""" if upscale_method != "None": samples = self.upscale(samples, upscale_method, factor, crop)[0] return samples def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: """Initialize the state by either fetching the stored value or setting a default.""" value = self.get_value_by_id(key, my_unique_id) if value is not None: return value return default def get_output(self, pipe: dict,) -> Tuple: """Return a tuple of various elements fetched from the input pipe dictionary.""" return ( pipe, pipe.get("images"), pipe.get("model"), pipe.get("positive"), pipe.get("negative"), pipe.get("samples"), pipe.get("vae"), pipe.get("clip"), pipe.get("seed"), ) def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" return ( sdxl_pipe, sdxl_pipe.get("model"), sdxl_pipe.get("positive"), sdxl_pipe.get("negative"), sdxl_pipe.get("vae"), sdxl_pipe.get("refiner_model"), sdxl_pipe.get("refiner_positive"), sdxl_pipe.get("refiner_negative"), sdxl_pipe.get("refiner_vae"), sdxl_pipe.get("samples"), sdxl_pipe.get("clip"), sdxl_pipe.get("images"), sdxl_pipe.get("seed") ) # XY图表 class easyXYPlot: def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ') self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ') self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] self.grid_spacing = xyPlotData.get("grid_spacing") self.latent_id = 0 self.output_individuals = xyPlotData.get("output_individuals") self.x_label, self.y_label = [], [] self.max_width, self.max_height = 0, 0 self.latents_plot = [] self.image_list = [] self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 self.total = self.num_cols * self.num_rows self.num = 0 self.save_prefix = save_prefix self.image_output = image_output self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.my_unique_id = my_unique_id # Helper Functions @staticmethod def define_variable(plot_image_vars, value_type, value, index): plot_image_vars[value_type] = value if value_type in ["seed", "Seeds++ Batch"]: value_label = f"{value}" else: value_label = f"{value_type}: {value}" if "ControlNet" in value_type: if "," in value: line = value.split(',') value_label = f"{value_type}: {line[2]}" if value_type in ["ModelMergeBlocks"]: if ":" in value: line = value.split(':') value_label = f"{line[0]}" elif len(value) > 16: value_label = f"ModelMergeBlocks {index + 1}" else: value_label = f"MMB: {value}" if value_type in ["Positive Prompt S/R"]: value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt" if value_type in ["Negative Prompt S/R"]: value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt" if value_type in ["steps", "cfg", "denoise", "clip_skip", "lora_model_strength", "lora_clip_strength"]: value_label = f"{value_type}: {value}" if value_type == "positive": value_label = f"pos prompt {index + 1}" elif value_type == "negative": value_label = f"neg prompt {index + 1}" return plot_image_vars, value_label @staticmethod def get_font(font_size): return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size) @staticmethod def update_label(label, value, num_items): if len(label) < num_items: return [*label, value] return label @staticmethod def rearrange_tensors(latent, num_cols, num_rows): new_latent = [] for i in range(num_rows): for j in range(num_cols): index = j * num_rows + i new_latent.append(latent[index]) return new_latent def calculate_background_dimensions(self): border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0 bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * ( self.y_type != "None") bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * ( self.x_type != "None") x_offset_initial = border_size if self.y_type != "None" else 0 y_offset = border_size if self.x_type != "None" else 0 return bg_width, bg_height, x_offset_initial, y_offset def adjust_font_size(self, text, initial_font_size, label_width): font = self.get_font(initial_font_size) text_width, _ = font.getsize(text) scaling_factor = 0.9 if text_width > (label_width * scaling_factor): return int(initial_font_size * (label_width / text_width) * scaling_factor) else: return initial_font_size def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): label_width = img.width if is_x_label else img.height # Adjust font size font_size = self.adjust_font_size(text, initial_font_size, label_width) font_size = min(max_font_size, font_size) # Ensure font isn't too large font_size = max(min_font_size, font_size) # Ensure font isn't too small label_height = int(font_size * 1.5) if is_x_label else font_size label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) d = ImageDraw.Draw(label_bg) font = self.get_font(font_size) # Check if text will fit, if not insert ellipsis and reduce text if d.textsize(text, font=font)[0] > label_width: while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0: text = text[:-1] text = text + '...' # Compute text width and height for multi-line text text_lines = text.split('\n') text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) max_text_width = max(text_widths) total_text_height = sum(text_heights) # Compute position for each line of text lines_positions = [] current_y = 0 for line, line_width, line_height in zip(text_lines, text_widths, text_heights): text_x = (label_width - line_width) // 2 text_y = current_y + (label_height - total_text_height) // 2 current_y += line_height lines_positions.append((line, (text_x, text_y))) # Draw each line of text for line, (text_x, text_y) in lines_positions: d.text((text_x, text_y), line, fill='black', font=font) return label_bg def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise, x_value=None, y_value=None): model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None sampler_name, scheduler, denoise = None, None, None # 高级用法 if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced": if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch": seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value) if self.x_type == "Steps" or self.y_type == "Steps": steps = int(x_value) if self.x_type == "Steps" else int(y_value) if self.x_type == "StartStep" or self.y_type == "StartStep": start_step = int(x_value) if self.x_type == "StartStep" else int(y_value) if self.x_type == "EndStep" or self.y_type == "EndStep": last_step = int(x_value) if self.x_type == "EndStep" else int(y_value) if self.x_type == "CFG Scale" or self.y_type == "CFG Scale": cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value) if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler": sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler": scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Denoise" or self.y_type == "Denoise": denoise = float(x_value) if self.x_type == "Denoise" else float(y_value) # 模型叠加 if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks": ckpt_name_1, ckpt_name_2 = plot_image_vars['models'] model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1) model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2) xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value if ":" in xy_values: xy_line = xy_values.split(':') xy_values = xy_line[1] xy_arrs = xy_values.split(',') # ModelMergeBlocks if len(xy_arrs) == 3: input, middle, out = xy_arrs kwargs = { "input": input, "middle": middle, "out": out } elif len(xy_arrs) == 30: kwargs = {} kwargs["time_embed."] = xy_arrs[0] kwargs["label_emb."] = xy_arrs[1] for i in range(12): kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i] for i in range(3): kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i] for i in range(12): kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i] kwargs["out."] = xy_arrs[29] else: raise Exception("ModelMergeBlocks weight length error") default_ratio = next(iter(kwargs.values())) m = model1.clone() kp = model2.get_key_patches("diffusion_model.") for k in kp: ratio = float(default_ratio) k_unet = k[len("diffusion_model."):] last_arg_size = 0 for arg in kwargs: if k_unet.startswith(arg) and last_arg_size < len(arg): ratio = float(kwargs[arg]) last_arg_size = len(arg) m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) vae_use = plot_image_vars['vae_use'] clip = clip2 if vae_use == 'Use Model 2' else clip1 if vae_use == 'Use Model 2': vae = vae2 elif vae_use == 'Use Model 1': vae = vae1 else: (vae,) = VAELoader().load_vae(vae_use) model = m # 如果存在lora_stack叠加lora optional_lora_stack = plot_image_vars['lora_stack'] if optional_lora_stack is not None and optional_lora_stack != []: for lora in optional_lora_stack: lora_name = lora["lora_name"] model = model if model is not None else lora["model"] clip = clip if clip is not None else lora["clip"] lora_model_strength = lora["lora_model_strength"] lora_clip_strength = lora["lora_clip_strength"] if "lbw" in lora: lbw = lora["lbw"] lbw_a = lora["lbw_a"] lbw_b = lora["lbw_b"] cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0, lbw_a, lbw_b, "", lbw) model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) # 处理clip clip = clip.clone() if plot_image_vars['clip_skip'] != 0: clip.clip_layer(plot_image_vars['clip_skip']) # 提示词 if "Positive" in self.x_type or "Positive" in self.y_type: if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R': positive = x_value if self.x_type == "Positive Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] positive, positive_pooled = advanced_encode(clip, positive, plot_image_vars['positive_token_normalization'], plot_image_vars[ 'positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] if "Negative" in self.x_type or "Negative" in self.y_type: if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R': negative = x_value if self.x_type == "Negative Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] negative, negative_pooled = advanced_encode(clip, negative, plot_image_vars['negative_token_normalization'], plot_image_vars[ 'negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None': log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...') return False else: return True def get_latent(self, samples): # Extract the 'samples' tensor from the dictionary latent_image_tensor = samples["samples"] # Split the tensor into individual image tensors image_tensors = torch.split(latent_image_tensor, 1, dim=0) # Create a list of dictionaries containing the individual image tensors latent_list = [{'samples': image} for image in image_tensors] # Set latent only to the first latent of batch if self.latent_id >= len(latent_list): log_node_warn(f'easy kSampler[{self.my_unique_id}]',f'The selected latent_id ({self.latent_id}) is out of range.') log_node_warn(f'easy kSampler[{self.my_unique_id}]', f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).') self.latent_id = len(latent_list) - 1 return latent_list[self.latent_id] def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): for x_index, x_value in enumerate(self.x_values): plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) if self.y_type != 'None': for y_index, y_value in enumerate(self.y_values): plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) # ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder)
log_node_success("",f"{folder} Created Successfully")
6
2023-12-10 07:02:36+00:00
16k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/data/build.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "BoxMode", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "get_world_size", "path": "nativedancer/third_part/detectron2/utils/comm.py", "snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "seed_all_rng", "path": "nativedancer/third_part/detectron2/utils/env.py", "snippet": "def seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)" }, { "identifier": "PathManager", "path": "nativedancer/third_part/detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "_log_api_usage", "path": "nativedancer/third_part/detectron2/utils/logger.py", "snippet": "def _log_api_usage(identifier: str):\n \"\"\"\n Internal function used to log the usage of different detectron2 components\n inside facebook's infra.\n \"\"\"\n torch._C._log_api_usage_once(\"detectron2.\" + identifier)" }, { "identifier": "log_first_n", "path": "nativedancer/third_part/detectron2/utils/logger.py", "snippet": "def log_first_n(lvl, msg, n=1, *, name=None, key=\"caller\"):\n \"\"\"\n Log only for the first n times.\n\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n key (str or tuple[str]): the string(s) can be one of \"caller\" or\n \"message\", which defines how to identify duplicated logs.\n For example, if called with `n=1, key=\"caller\"`, this function\n will only log the first call from the same caller, regardless of\n the message content.\n If called with `n=1, key=\"message\"`, this function will log the\n same content only once, even if they are called from different places.\n If called with `n=1, key=(\"caller\", \"message\")`, this function\n will not log only if the same caller has logged the same message before.\n \"\"\"\n if isinstance(key, str):\n key = (key,)\n assert len(key) > 0\n\n caller_module, caller_key = _find_caller()\n hash_key = ()\n if \"caller\" in key:\n hash_key = hash_key + caller_key\n if \"message\" in key:\n hash_key = hash_key + (msg,)\n\n _LOG_COUNTER[hash_key] += 1\n if _LOG_COUNTER[hash_key] <= n:\n logging.getLogger(name or caller_module).log(lvl, msg)" }, { "identifier": "DatasetCatalog", "path": "nativedancer/third_part/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "AspectRatioGroupedDataset", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class AspectRatioGroupedDataset(data.IterableDataset):\n \"\"\"\n Batch data that have similar aspect ratio together.\n In this implementation, images whose aspect ratio < (or >) 1 will\n be batched together.\n This improves training speed because the images then need less padding\n to form a batch.\n\n It assumes the underlying dataset produces dicts with \"width\" and \"height\" keys.\n It will then produce a list of original dicts with length = batch_size,\n all with similar aspect ratios.\n \"\"\"\n\n def __init__(self, dataset, batch_size):\n \"\"\"\n Args:\n dataset: an iterable. Each element must be a dict with keys\n \"width\" and \"height\", which will be used to batch data.\n batch_size (int):\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self._buckets = [[] for _ in range(2)]\n # Hard-coded two aspect ratio groups: w > h and w < h.\n # Can add support for more aspect ratio groups, but doesn't seem useful\n\n def __iter__(self):\n for d in self.dataset:\n w, h = d[\"width\"], d[\"height\"]\n bucket_id = 0 if w > h else 1\n bucket = self._buckets[bucket_id]\n bucket.append(d)\n if len(bucket) == self.batch_size:\n data = bucket[:]\n # Clear bucket first, because code after yield is not\n # guaranteed to execute\n del bucket[:]\n yield data" }, { "identifier": "DatasetFromList", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class DatasetFromList(data.Dataset):\n \"\"\"\n Wrap a list to a torch Dataset. It produces elements of the list as data.\n \"\"\"\n\n def __init__(\n self,\n lst: list,\n copy: bool = True,\n serialize: Union[bool, Callable] = True,\n ):\n \"\"\"\n Args:\n lst (list): a list which contains elements to produce.\n copy (bool): whether to deepcopy the element when producing it,\n so that the result can be modified in place without affecting the\n source in the list.\n serialize (bool or callable): whether to serialize the stroage to other\n backend. If `True`, the default serialize method will be used, if given\n a callable, the callable will be used as serialize method.\n \"\"\"\n self._lst = lst\n self._copy = copy\n if not isinstance(serialize, (bool, Callable)):\n raise TypeError(f\"Unsupported type for argument `serailzie`: {serialize}\")\n self._serialize = serialize is not False\n\n if self._serialize:\n serialize_method = (\n serialize\n if isinstance(serialize, Callable)\n else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD\n )\n logger.info(f\"Serializing the dataset using: {serialize_method}\")\n self._lst = serialize_method(self._lst)\n\n def __len__(self):\n return len(self._lst)\n\n def __getitem__(self, idx):\n if self._copy and not self._serialize:\n return copy.deepcopy(self._lst[idx])\n else:\n return self._lst[idx]" }, { "identifier": "MapDataset", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class MapDataset(data.Dataset):\n \"\"\"\n Map a function over the elements in a dataset.\n \"\"\"\n\n def __init__(self, dataset, map_func):\n \"\"\"\n Args:\n dataset: a dataset where map function is applied. Can be either\n map-style or iterable dataset. When given an iterable dataset,\n the returned object will also be an iterable dataset.\n map_func: a callable which maps the element in dataset. map_func can\n return None to skip the data (e.g. in case of errors).\n How None is handled depends on the style of `dataset`.\n If `dataset` is map-style, it randomly tries other elements.\n If `dataset` is iterable, it skips the data and tries the next.\n \"\"\"\n self._dataset = dataset\n self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work\n\n self._rng = random.Random(42)\n self._fallback_candidates = set(range(len(dataset)))\n\n def __new__(cls, dataset, map_func):\n is_iterable = isinstance(dataset, data.IterableDataset)\n if is_iterable:\n return _MapIterableDataset(dataset, map_func)\n else:\n return super().__new__(cls)\n\n def __getnewargs__(self):\n return self._dataset, self._map_func\n\n def __len__(self):\n return len(self._dataset)\n\n def __getitem__(self, idx):\n retry_count = 0\n cur_idx = int(idx)\n\n while True:\n data = self._map_func(self._dataset[cur_idx])\n if data is not None:\n self._fallback_candidates.add(cur_idx)\n return data\n\n # _map_func fails for this idx, use a random new index from the pool\n retry_count += 1\n self._fallback_candidates.discard(cur_idx)\n cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]\n\n if retry_count >= 3:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to apply `_map_func` for idx: {}, retry count: {}\".format(\n idx, retry_count\n )\n )" }, { "identifier": "ToIterableDataset", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class ToIterableDataset(data.IterableDataset):\n \"\"\"\n Convert an old indices-based (also called map-style) dataset\n to an iterable-style dataset.\n \"\"\"\n\n def __init__(\n self,\n dataset: data.Dataset,\n sampler: Sampler,\n shard_sampler: bool = True,\n shard_chunk_size: int = 1,\n ):\n \"\"\"\n Args:\n dataset: an old-style dataset with ``__getitem__``\n sampler: a cheap iterable that produces indices to be applied on ``dataset``.\n shard_sampler: whether to shard the sampler based on the current pytorch data loader\n worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple\n workers, it is responsible for sharding its data based on worker id so that workers\n don't produce identical data.\n\n Most samplers (like our TrainingSampler) do not shard based on dataloader worker id\n and this argument should be set to True. But certain samplers may be already\n sharded, in that case this argument should be set to False.\n shard_chunk_size: when sharding the sampler, each worker will\n \"\"\"\n assert not isinstance(dataset, data.IterableDataset), dataset\n assert isinstance(sampler, Sampler), sampler\n self.dataset = dataset\n self.sampler = sampler\n self.shard_sampler = shard_sampler\n self.shard_chunk_size = shard_chunk_size\n\n def __iter__(self):\n if not self.shard_sampler:\n sampler = self.sampler\n else:\n # With map-style dataset, `DataLoader(dataset, sampler)` runs the\n # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`\n # will run sampler in every of the N worker. So we should only keep 1/N of the ids on\n # each worker. The assumption is that sampler is cheap to iterate so it's fine to\n # discard ids in workers.\n sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)\n for idx in sampler:\n yield self.dataset[idx]\n\n def __len__(self):\n return len(self.sampler)" }, { "identifier": "DatasetMapper", "path": "nativedancer/third_part/detectron2/data/dataset_mapper.py", "snippet": "class DatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n use_keypoint: bool = False,\n instance_mask_format: str = \"polygon\",\n keypoint_hflip_indices: Optional[np.ndarray] = None,\n precomputed_proposal_topk: Optional[int] = None,\n recompute_boxes: bool = False,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of \"polygon\" or \"bitmask\". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n \"\"\"\n if recompute_boxes:\n assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.instance_mask_format = instance_mask_format\n self.use_keypoint = use_keypoint\n self.keypoint_hflip_indices = keypoint_hflip_indices\n self.proposal_topk = precomputed_proposal_topk\n self.recompute_boxes = recompute_boxes\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = utils.build_augmentation(cfg, is_train)\n if cfg.INPUT.CROP.ENABLED and is_train:\n augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))\n recompute_boxes = cfg.MODEL.MASK_ON\n else:\n recompute_boxes = False\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"instance_mask_format\": cfg.INPUT.MASK_FORMAT,\n \"use_keypoint\": cfg.MODEL.KEYPOINT_ON,\n \"recompute_boxes\": recompute_boxes,\n }\n\n if cfg.MODEL.KEYPOINT_ON:\n ret[\"keypoint_hflip_indices\"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)\n\n if cfg.MODEL.LOAD_PROPOSALS:\n ret[\"precomputed_proposal_topk\"] = (\n cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN\n if is_train\n else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST\n )\n return ret\n\n def _transform_annotations(self, dataset_dict, transforms, image_shape):\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n if not self.use_instance_mask:\n anno.pop(\"segmentation\", None)\n if not self.use_keypoint:\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(\n obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices\n )\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(\n annos, image_shape, mask_format=self.instance_mask_format\n )\n\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n if self.recompute_boxes:\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n transforms = self.augmentations(aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n # USER: Remove if you don't use pre-computed proposals.\n # Most users would not need this feature.\n if self.proposal_topk is not None:\n utils.transform_proposals(\n dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk\n )\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n self._transform_annotations(dataset_dict, transforms, image_shape)\n\n return dataset_dict" }, { "identifier": "check_metadata_consistency", "path": "nativedancer/third_part/detectron2/data/detection_utils.py", "snippet": "def check_metadata_consistency(key, dataset_names):\n \"\"\"\n Check that the datasets have consistent metadata.\n\n Args:\n key (str): a metadata key\n dataset_names (list[str]): a list of dataset names\n\n Raises:\n AttributeError: if the key does not exist in the metadata\n ValueError: if the given datasets do not have the same metadata values defined by key\n \"\"\"\n if len(dataset_names) == 0:\n return\n logger = logging.getLogger(__name__)\n entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]\n for idx, entry in enumerate(entries_per_dataset):\n if entry != entries_per_dataset[0]:\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(key, dataset_names[idx], str(entry))\n )\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(\n key, dataset_names[0], str(entries_per_dataset[0])\n )\n )\n raise ValueError(\"Datasets have different metadata '{}'!\".format(key))" }, { "identifier": "InferenceSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class InferenceSampler(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self._size = size\n assert size > 0\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n self._local_indices = self._get_local_indices(size, self._world_size, self._rank)\n\n @staticmethod\n def _get_local_indices(total_size, world_size, rank):\n shard_size = total_size // world_size\n left = total_size % world_size\n shard_sizes = [shard_size + int(r < left) for r in range(world_size)]\n\n begin = sum(shard_sizes[:rank])\n end = min(sum(shard_sizes[: rank + 1]), total_size)\n return range(begin, end)\n\n def __iter__(self):\n yield from self._local_indices\n\n def __len__(self):\n return len(self._local_indices)" }, { "identifier": "RandomSubsetTrainingSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class RandomSubsetTrainingSampler(TrainingSampler):\n \"\"\"\n Similar to TrainingSampler, but only sample a random subset of indices.\n This is useful when you want to estimate the accuracy vs data-number curves by\n training the model with different subset_ratio.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n subset_ratio: float,\n shuffle: bool = True,\n seed_shuffle: Optional[int] = None,\n seed_subset: Optional[int] = None,\n ):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n subset_ratio (float): the ratio of subset data to sample from the underlying dataset\n shuffle (bool): whether to shuffle the indices or not\n seed_shuffle (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n seed_subset (int): the seed to randomize the subset to be sampled.\n Must be the same across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)\n\n assert 0.0 < subset_ratio <= 1.0\n self._size_subset = int(size * subset_ratio)\n assert self._size_subset > 0\n if seed_subset is None:\n seed_subset = comm.shared_random_seed()\n self._seed_subset = int(seed_subset)\n\n # randomly generate the subset indexes to be sampled from\n g = torch.Generator()\n g.manual_seed(self._seed_subset)\n indexes_randperm = torch.randperm(self._size, generator=g)\n self._indexes_subset = indexes_randperm[: self._size_subset]\n\n logger.info(\"Using RandomSubsetTrainingSampler......\")\n logger.info(f\"Randomly sample {self._size_subset} data from the original {self._size} data\")\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()\n while True:\n if self._shuffle:\n # generate a random permutation to shuffle self._indexes_subset\n randperm = torch.randperm(self._size_subset, generator=g)\n yield from self._indexes_subset[randperm].tolist()\n else:\n yield from self._indexes_subset.tolist()" }, { "identifier": "RepeatFactorTrainingSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class RepeatFactorTrainingSampler(Sampler):\n \"\"\"\n Similar to TrainingSampler, but a sample may appear more times than others based\n on its \"repeat factor\". This is suitable for training on class imbalanced datasets like LVIS.\n \"\"\"\n\n def __init__(self, repeat_factors, *, shuffle=True, seed=None):\n \"\"\"\n Args:\n repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's\n full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n # Split into whole number (_int_part) and fractional (_frac_part) parts.\n self._int_part = torch.trunc(repeat_factors)\n self._frac_part = repeat_factors - self._int_part\n\n @staticmethod\n def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):\n \"\"\"\n Compute (fractional) per-image repeat factors based on category frequency.\n The repeat factor for an image is a function of the frequency of the rarest\n category labeled in that image. The \"frequency of category c\" in [0, 1] is defined\n as the fraction of images in the training set (without repeats) in which category c\n appears.\n See :paper:`lvis` (>= v2) Appendix B.2.\n\n Args:\n dataset_dicts (list[dict]): annotations in Detectron2 dataset format.\n repeat_thresh (float): frequency threshold below which data is repeated.\n If the frequency is half of `repeat_thresh`, the image will be\n repeated twice.\n\n Returns:\n torch.Tensor:\n the i-th element is the repeat factor for the dataset image at index i.\n \"\"\"\n # 1. For each category c, compute the fraction of images that contain it: f(c)\n category_freq = defaultdict(int)\n for dataset_dict in dataset_dicts: # For each image (without repeats)\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n for cat_id in cat_ids:\n category_freq[cat_id] += 1\n num_images = len(dataset_dicts)\n for k, v in category_freq.items():\n category_freq[k] = v / num_images\n\n # 2. For each category c, compute the category-level repeat factor:\n # r(c) = max(1, sqrt(t / f(c)))\n category_rep = {\n cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))\n for cat_id, cat_freq in category_freq.items()\n }\n\n # 3. For each image I, compute the image-level repeat factor:\n # r(I) = max_{c in I} r(c)\n rep_factors = []\n for dataset_dict in dataset_dicts:\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)\n rep_factors.append(rep_factor)\n\n return torch.tensor(rep_factors, dtype=torch.float32)\n\n def _get_epoch_indices(self, generator):\n \"\"\"\n Create a list of dataset indices (with repeats) to use for one epoch.\n\n Args:\n generator (torch.Generator): pseudo random number generator used for\n stochastic rounding.\n\n Returns:\n torch.Tensor: list of dataset indices to use in one epoch. Each index\n is repeated based on its calculated repeat factor.\n \"\"\"\n # Since repeat factors are fractional, we use stochastic rounding so\n # that the target repeat factor is achieved in expectation over the\n # course of training\n rands = torch.rand(len(self._frac_part), generator=generator)\n rep_factors = self._int_part + (rands < self._frac_part).float()\n # Construct a list of indices in which we repeat images as specified\n indices = []\n for dataset_index, rep_factor in enumerate(rep_factors):\n indices.extend([dataset_index] * int(rep_factor.item()))\n return torch.tensor(indices, dtype=torch.int64)\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n # Sample indices with repeats determined by stochastic rounding; each\n # \"epoch\" may have a slightly different size due to the rounding.\n indices = self._get_epoch_indices(g)\n if self._shuffle:\n randperm = torch.randperm(len(indices), generator=g)\n yield from indices[randperm].tolist()\n else:\n yield from indices.tolist()" }, { "identifier": "TrainingSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n\n Note that this sampler does not shard based on pytorch DataLoader worker id.\n A sampler passed to pytorch DataLoader is used only with map-style dataset\n and will not be executed inside workers.\n But if this sampler is used in a way that it gets execute inside a dataloader\n worker, then extra work needs to be done to shard its outputs based on worker id.\n This is required so that workers don't produce identical data.\n :class:`ToIterableDataset` implements this logic.\n This note is true for all samplers in detectron2.\n \"\"\"\n\n def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(f\"TrainingSampler(size=) expects an int. Got type {type(size)}.\")\n if size <= 0:\n raise ValueError(f\"TrainingSampler(size=) expects a positive int. Got {size}.\")\n self._size = size\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n if self._shuffle:\n yield from torch.randperm(self._size, generator=g).tolist()\n else:\n yield from torch.arange(self._size).tolist()" } ]
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from ..config import configurable from ..structures import BoxMode from ..utils.comm import get_world_size from ..utils.env import seed_all_rng from ..utils.file_io import PathManager from ..utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
10,887
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file))
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
4
2023-12-10 20:14:00+00:00
16k
ethanweber/nerfiller
nerfiller/nerf/nerfiller_pipeline.py
[ { "identifier": "RGBInpainter", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_model_path: Optional[str] = None,\n device: str = \"cuda:0\",\n vae_device: str = \"cuda:0\",\n pipeline_name: str = \"stabilityai/stable-diffusion-2-inpainting\",\n ):\n print(f\"Loading RGB Inpainter ...\")\n\n self.half_precision_weights = half_precision_weights\n self.lora_model_path = lora_model_path\n self.device = device\n self.vae_device = vae_device\n self.dtype = torch.float16 if self.half_precision_weights else torch.float32\n self.pipeline_name = pipeline_name\n self.set_pipe()\n self.setup()\n\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = StableDiffusionInpaintPipeline.from_pretrained(\n self.pipeline_name,\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n self.tokenizer = self.pipe.tokenizer\n self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n del self.pipe\n cleanup()\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.tokenizer is not None\n assert self.text_encoder is not None\n with torch.no_grad():\n text_inputs = tokenize_prompt(self.tokenizer, prompt, tokenizer_max_length=None)\n prompt_embeds = encode_prompt(\n self.text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n negative_text_inputs = tokenize_prompt(self.tokenizer, negative_prompt, tokenizer_max_length=None)\n negative_prompt_embeds = encode_prompt(\n self.text_encoder,\n negative_text_inputs.input_ids,\n negative_text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n\n return [prompt_embeds, negative_prompt_embeds]\n\n def destroy_text_encoder(self) -> None:\n \"\"\"Delete the text modules to save on memory.\"\"\"\n del self.tokenizer\n del self.text_encoder\n cleanup()\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n prompt_embeds, negative_prompt_embeds = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ]\n )\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n return_dict=False,\n )[0]\n return noise_pred\n\n def get_noise_pred(\n self,\n t,\n model_input: ModelInput,\n text_embeddings,\n text_guidance_scale: float = 0.0,\n image_guidance_scale: float = 0.0,\n denoise_in_grid: bool = False,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n only_noise_pred: bool = False,\n ):\n assert self.scheduler.config.prediction_type == \"epsilon\", \"We assume the model predicts epsilon.\"\n\n batch_size = model_input.latents.shape[0]\n value = torch.zeros_like(model_input.latents)\n count = torch.zeros_like(model_input.latents)\n\n for i in range(multidiffusion_steps):\n if randomize_latents:\n indices = torch.randperm(batch_size)\n else:\n indices = torch.arange(batch_size)\n\n if denoise_in_grid and randomize_within_grid:\n for j in range(0, len(indices), 4):\n indices[j : j + 4] = indices[j : j + 4][torch.randperm(4)]\n\n latents = model_input.latents[indices]\n latents_mask = model_input.latents_mask[indices]\n latents_mask_uncond = model_input.latents_mask_uncond[indices]\n masked_image_latents = model_input.masked_image_latents[indices]\n masked_image_latents_uncond = model_input.masked_image_latents_uncond[indices]\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents, latents, latents])\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n latents_mask_input = torch.cat([latents_mask, latents_mask, latents_mask_uncond])\n masked_image_latents_input = torch.cat(\n [\n masked_image_latents,\n masked_image_latents,\n masked_image_latents_uncond,\n ]\n )\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input_cat = torch.cat(\n [latent_model_input, latents_mask_input, masked_image_latents_input],\n dim=1,\n )\n\n # TODO: save compute by skipping some text encodings if not using them in CFG\n\n noise_pred_all = self.forward_unet(\n sample=latent_model_input_cat,\n t=t,\n text_embeddings=text_embeddings,\n denoise_in_grid=denoise_in_grid,\n )\n\n noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred_all.chunk(3)\n\n noise_pred = (\n noise_pred_image\n + text_guidance_scale * (noise_pred_text - noise_pred_image)\n + image_guidance_scale * (noise_pred_image - noise_pred_uncond)\n )\n\n if multidiffusion_type == \"v_prediction\":\n v_prediction = get_v_prediction_from_epsilon(noise_pred, t, latents, self.scheduler.alphas_cumprod)\n value[indices] += v_prediction\n count[indices] += 1\n elif multidiffusion_type == \"epsilon\":\n value[indices] += noise_pred\n count[indices] += 1\n else:\n raise ValueError(\"Not implemented.\")\n\n # take the MultiDiffusion step\n final_noise_pred = torch.where(count > 0, value / count, value)\n\n if multidiffusion_type == \"v_prediction\":\n final_noise_pred = get_epsilon_from_v_prediction(\n final_noise_pred,\n t.item(),\n model_input.latents,\n self.scheduler.alphas_cumprod,\n )\n elif multidiffusion_type == \"epsilon\":\n pass\n else:\n raise ValueError(\"Not implemented.\")\n\n if only_noise_pred:\n return None, None, final_noise_pred\n\n scheduler_output = self.scheduler.step(final_noise_pred, t, model_input.latents, generator=generator)\n pred_prev_sample = scheduler_output.prev_sample\n pred_original_sample = scheduler_output.pred_original_sample\n\n assert not pred_prev_sample.isnan().any()\n assert not pred_original_sample.isnan().any()\n return pred_prev_sample, pred_original_sample, final_noise_pred\n\n def get_model_input(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_timestep: Optional[int] = None,\n keep_grad: bool = False,\n ) -> ModelInput:\n \"\"\"Returns the inputs for the unet.\"\"\"\n\n # TODO: incorporate seeds\n\n batch_size, _, height, width = image.shape\n\n noise = randn_tensor(\n shape=(\n batch_size,\n self.vae_latent_channels,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n ),\n generator=generator,\n device=torch.device(self.device),\n dtype=self.dtype,\n )\n if starting_image is not None:\n assert starting_timestep is not None\n if keep_grad:\n latents = self.encode_images(starting_image)\n else:\n with torch.no_grad():\n latents = self.encode_images(starting_image)\n latents = self.scheduler.add_noise(latents, noise, starting_timestep)\n else:\n latents = noise\n\n latents_mask = torch.nn.functional.interpolate(\n mask,\n size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"nearest\",\n )\n assert len(torch.unique(latents_mask)) <= 2\n latents_mask = latents_mask.to(device=self.device, dtype=self.dtype)\n assert len(torch.unique(mask)) <= 2\n masked_image = torch.where(mask == 0, image, 0.5)\n with torch.no_grad():\n masked_image_latents = self.encode_images(masked_image)\n\n latents_mask_uncond = torch.ones_like(latents_mask)\n masked_image_uncond = torch.ones_like(masked_image) * 0.5\n with torch.no_grad():\n masked_image_latents_uncond = self.encode_images(masked_image_uncond)\n\n model_input = ModelInput(\n latents.to(device=self.device, dtype=self.dtype),\n latents_mask.to(device=self.device, dtype=self.dtype),\n masked_image_latents.to(device=self.device, dtype=self.dtype),\n latents_mask_uncond.to(device=self.device, dtype=self.dtype),\n masked_image_latents_uncond.to(device=self.device, dtype=self.dtype),\n noise.to(device=self.device, dtype=self.dtype),\n )\n\n return model_input\n\n def get_loss(\n self,\n x0: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n output_folder: Optional[Path] = None,\n step: int = 0,\n guidance_step: int = 0,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n ):\n \"\"\"Losses on the VAE decoded images x0.\n The multi-view loss is applied where mask == 0.0 (regions that have known depth).\n \"\"\"\n\n loss = 0.0\n\n if multiview_guidance_scale != 0.0:\n features = feature_extractor(x0.to(feature_extractor.device)).to(self.device)\n\n # multiview guidance\n scale_factor = features.shape[-1] / x0.shape[-1]\n K_scaled = rescale_intrinsics(K, scale_factor, scale_factor)\n mask_scaled = 1.0 - torch.nn.functional.interpolate(mask, scale_factor=scale_factor, mode=\"nearest\")\n depth_scaled = torch.nn.functional.interpolate(depth, scale_factor=scale_factor, mode=\"bilinear\")\n for cam1 in range(len(c2w)):\n for cam2 in range(cam1 + 1, len(c2w)):\n loss_mv, loss_dict = multiview_metric(\n features1=features[cam1 : cam1 + 1],\n features2=features[cam2 : cam2 + 1],\n K1=K_scaled[cam1 : cam1 + 1],\n K2=K_scaled[cam2 : cam2 + 1],\n c2w1=c2w[cam1 : cam1 + 1],\n c2w2=c2w[cam2 : cam2 + 1],\n image1=x0[cam1 : cam1 + 1],\n image2=x0[cam2 : cam2 + 1],\n mask1=mask_scaled[cam1 : cam1 + 1],\n mask2=mask_scaled[cam2 : cam2 + 1],\n depth1=depth_scaled[cam1 : cam1 + 1],\n depth2=depth_scaled[cam2 : cam2 + 1],\n output_folder=output_folder if (cam1 == 0 and guidance_step == 0) else None,\n suffix=f\"-{step:06d}-{cam1:06d}-{cam2:06d}-{guidance_step:06d}\",\n )\n loss += multiview_guidance_scale * loss_mv.sum()\n\n if reconstruction_guidance_scale != 0.0:\n loss += (\n reconstruction_guidance_scale * (((starting_image.to(x0.device) - x0) * mask.to(x0.device)) ** 2).mean()\n )\n\n return loss\n\n @torch.cuda.amp.autocast(enabled=True)\n def get_image(\n self,\n text_embeddings,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n num_inference_steps: int = 20,\n denoise_in_grid: bool = False,\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n use_decoder_approximation: bool = False,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n show_multiview: bool = False,\n guidance_steps: List[int] = [5],\n num_guidance_steps: int = 10,\n classifier_guidance_scale: float = 0.0,\n output_folder: Optional[Path] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_lower_bound: Optional[float] = None,\n starting_upper_bound: Optional[float] = None,\n classifier_guidance_loss_rescale=1000.0,\n classifier_guidance_start_step: int = 0,\n replace_original_pixels: bool = False,\n ) -> Float[Tensor, \"B 3 H W\"]:\n \"\"\"Run the denoising sampling process, also known as the reverse process.\n Inpaint where mask == 1.\n If output folder is not None, then save images to this folder.\n\n Args:\n text_embeddings: Either 2 per image (BB) or 2 total, which will use the same cond. and uncond. prompts for all.\n loss_rescale: To prevent fp16 underflow\n \"\"\"\n\n if output_folder:\n output_folder.mkdir(parents=True, exist_ok=True)\n\n batch_size, _, height, width = image.shape\n\n if starting_lower_bound is not None:\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n # select t, set multi-step diffusion\n T = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n self.scheduler.config.num_train_timesteps = T.item()\n else:\n self.scheduler.config.num_train_timesteps = self.num_train_timesteps\n\n self.scheduler.set_timesteps(num_inference_steps, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n # self.scheduler.config.num_train_timesteps == 1000 is equivalent to starting_lower_bound and starting_upper_bound both being 1\n # so start with full noise by setting this to None\n starting_image=starting_image if self.scheduler.config.num_train_timesteps != 1000 else None,\n starting_timestep=self.scheduler.timesteps[0],\n )\n\n if depth is None:\n depth = torch.zeros_like(mask)\n\n progress = Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TaskProgressColumn(),\n TimeElapsedColumn(),\n )\n task1 = progress.add_task(\n f\"[green]Inpainting batch of images...\",\n total=len(self.scheduler.timesteps),\n )\n\n with progress:\n for i, t in enumerate(self.scheduler.timesteps):\n start_time = time.time()\n\n # DragDiffusion style guidance (\"drag\")\n use_drag_guidance = (\n multiview_guidance_scale != 0.0 or reconstruction_guidance_scale != 0.0\n ) and i in guidance_steps\n if use_drag_guidance:\n model_input.latents = model_input.latents.to(torch.float32).detach().requires_grad_(True)\n scaler = torch.cuda.amp.GradScaler()\n optimizer = torch.optim.Adam([model_input.latents], lr=1e-2)\n for guidance_step in range(num_guidance_steps):\n _, pred_original_sample, _ = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=1,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if output_folder:\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(\n output_folder / f\"x0-{i:06d}-{guidance_step:06d}.png\",\n image_x0,\n )\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"drag_guidance\",\n step=i,\n guidance_step=guidance_step,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/drag_guidance_loss-{i}\": loss})\n\n optimizer.zero_grad()\n assert not loss.isnan().any()\n scaler.scale(loss).backward()\n\n assert not model_input.latents.grad.isnan().any()\n # print(\n # model_input.latents.grad.abs().mean(),\n # (model_input.latents.grad == 0.0).sum() / model_input.latents.grad.numel(),\n # )\n\n scaler.step(optimizer)\n assert not model_input.latents.isnan().any()\n assert not depth.isnan().any()\n scaler.update()\n\n # take a step\n use_classifier_guidance = classifier_guidance_scale != 0.0 and i >= classifier_guidance_start_step\n model_input.latents = (\n model_input.latents.to(self.dtype).detach().requires_grad_(use_classifier_guidance)\n )\n with torch.enable_grad() if use_classifier_guidance else torch.no_grad():\n _, pred_original_sample, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=multidiffusion_steps,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n\n # classifier guidance (\"classifier\")\n if use_classifier_guidance:\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"classifier_guidance\",\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/classifier_guidance_loss\": loss})\n\n grad = (\n torch.autograd.grad(\n classifier_guidance_loss_rescale * loss,\n model_input.latents,\n )[0]\n / classifier_guidance_loss_rescale\n )\n # print(\n # grad.abs().mean(),\n # (grad == 0.0).sum() / grad.numel(),\n # )\n noise_pred = noise_pred + classifier_guidance_scale * grad\n\n model_input.latents = model_input.latents.detach().requires_grad_(False)\n scheduler_output = self.scheduler.step(noise_pred, t, model_input.latents, generator=generator)\n model_input.latents = scheduler_output.prev_sample\n\n if output_folder:\n # save the denoised x0\n with torch.no_grad():\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if use_drag_guidance or use_classifier_guidance:\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=None,\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/loss\": loss})\n\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(output_folder / \"x0.png\", image_x0)\n mediapy.write_image(output_folder / f\"x0-{i:06d}.png\", image_x0)\n\n progress.update(task1, advance=1)\n end_time = time.time()\n # print(f\"[green]Time for iter {i}:\", end_time - start_time)\n\n if output_folder:\n output_filename = str(output_folder) + \".mp4\"\n CONSOLE.print(f\"[green]Saving video to {output_filename}\")\n save_video_from_path(\n path=output_folder,\n glob_str=\"x0*png\",\n sec=10,\n output_filename=output_filename,\n )\n\n with torch.no_grad():\n x0 = self.decode_latents(\n model_input.latents.detach(),\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n return x0\n\n def encode_images(self, imgs: Float[Tensor, \"B 3 512 512\"]) -> Float[Tensor, \"B 4 64 64\"]:\n imgs = imgs * 2.0 - 1.0\n sampled_posterior = self.vae.encode(imgs.to(self.vae_device), return_dict=False)[0].sample().to(self.device)\n latents = sampled_posterior * 0.18215\n return latents\n\n def decode_latents(\n self,\n latents: Float[Tensor, \"B 4 H W\"],\n use_decoder_approximation: bool = False,\n ) -> Float[Tensor, \"B 3 Hout Wout\"]:\n if use_decoder_approximation:\n da = get_decoder_approximation().to(latents)\n x = torch.nn.functional.interpolate(latents, scale_factor=self.vae_scale_factor, mode=\"bilinear\")\n x = torch.matmul(x.permute(0, 2, 3, 1), da).permute(0, 3, 1, 2)\n return x\n else:\n scaled_latents = 1 / 0.18215 * latents\n image = self.vae.decode(scaled_latents.to(self.vae_device), return_dict=False)[0].to(self.device)\n image = (image * 0.5 + 0.5).clamp(0, 1)\n return image\n\n def sds_loss(\n self,\n text_embeddings: Union[Float[Tensor, \"BB 77 768\"], Float[Tensor, \"2 77 768\"]],\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n starting_image: Float[Tensor, \"B 3 H W\"],\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n starting_lower_bound: float = 0.02,\n starting_upper_bound: float = 0.98,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ) -> torch.Tensor:\n \"\"\"Score Distilation Sampling loss proposed in DreamFusion paper (https://dreamfusion3d.github.io/)\n Args:\n text_embeddings: Text embeddings\n image: Rendered image\n mask: Mask, inpaint where 1\n text_guidance_scale: How much to weigh the guidance\n image_guidance_scale: How much to weigh the guidance\n Returns:\n The loss\n \"\"\"\n\n # NOTE: doesn't work for gridding right now\n\n batch_size, _, height, width = image.shape\n\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n\n t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n starting_image=starting_image,\n starting_timestep=t,\n keep_grad=True,\n )\n\n # predict the noise residual with unet, NO grad!\n with torch.no_grad():\n _, _, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n only_noise_pred=True,\n )\n\n # w(t), sigma_t^2\n w = 1 - self.alphas[t]\n\n grad = w * (noise_pred - model_input.noise)\n grad = torch.nan_to_num(grad)\n\n target = (model_input.latents - grad).detach()\n loss = (\n 0.5\n * torch.nn.functional.mse_loss(model_input.latents, target, reduction=\"sum\")\n / model_input.latents.shape[0]\n )\n\n return loss" }, { "identifier": "RGBInpainterXL", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainterXL(RGBInpainter):\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = AutoPipelineForInpainting.from_pretrained(\n \"diffusers/stable-diffusion-xl-1.0-inpainting-0.1\",\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n # self.tokenizer = self.pipe.tokenizer\n # self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n self.pipe.to(self.device)\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n # save this in order to delete the pipeline after text encoding\n self.text_encoder_2_config_projection_dim = self.pipe.text_encoder_2.config.projection_dim\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.pipe is not None\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.pipe.encode_prompt(prompt, negative_prompt, device=self.device)\n return [\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ]\n\n def remove_pipe(self):\n del self.pipe\n cleanup()\n\n # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids\n def _get_add_time_ids(\n self,\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype,\n requires_aesthetics_score=False,\n ):\n if requires_aesthetics_score:\n add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))\n add_neg_time_ids = list(\n negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)\n )\n else:\n add_time_ids = list(original_size + crops_coords_top_left + target_size)\n add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)\n\n passed_add_embed_dim = (\n self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2_config_projection_dim\n )\n expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features\n\n if (\n expected_add_embed_dim > passed_add_embed_dim\n and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim\n ):\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.\"\n )\n elif (\n expected_add_embed_dim < passed_add_embed_dim\n and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim\n ):\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.\"\n )\n elif expected_add_embed_dim != passed_add_embed_dim:\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.\"\n )\n\n add_time_ids = torch.tensor([add_time_ids], dtype=dtype)\n add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)\n\n return add_time_ids, add_neg_time_ids\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = sample.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = (height, width)\n target_size = (height, width)\n\n crops_coords_top_left = (0, 0)\n aesthetic_score = 6.0\n negative_aesthetic_score = 2.5\n negative_crops_coords_top_left = (0, 0)\n\n negative_original_size = original_size\n negative_target_size = target_size\n\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype=self.dtype,\n )\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ],\n dim=0,\n )\n add_text_embeds = torch.cat(\n [\n pooled_prompt_embeds.repeat(batch_size, 1),\n negative_pooled_prompt_embeds.repeat(batch_size, 1),\n negative_pooled_prompt_embeds.repeat(batch_size, 1),\n ],\n dim=0,\n )\n add_time_ids = torch.cat(\n [\n add_time_ids.repeat(batch_size, 1),\n add_neg_time_ids.repeat(batch_size, 1),\n add_neg_time_ids.repeat(batch_size, 1),\n ],\n dim=0,\n )\n\n prompt_embeds = prompt_embeds.to(self.device)\n add_text_embeds = add_text_embeds.to(self.device)\n add_time_ids = add_time_ids.to(self.device)\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n grid_add_text_embeds = add_text_embeds[:3].repeat(grid_sample.shape[0] // 3, 1)\n grid_add_time_ids = add_time_ids[:3].repeat(grid_sample.shape[0] // 3, 1)\n added_cond_kwargs = {\n \"text_embeds\": grid_add_text_embeds,\n \"time_ids\": grid_add_time_ids,\n }\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n added_cond_kwargs = {\n \"text_embeds\": add_text_embeds,\n \"time_ids\": add_time_ids,\n }\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n return noise_pred" }, { "identifier": "DepthInpainter", "path": "nerfiller/inpaint/depth_inpainter.py", "snippet": "class DepthInpainter:\n def __init__(\n self,\n max_depth: float = 10.0,\n tileX: bool = True,\n tileY: bool = False,\n depth_method: str = \"zoedepth\",\n device: str = \"cuda:0\",\n ):\n self.max_depth = max_depth\n self.tileX = tileX\n self.tileY = tileY\n self.depth_method = depth_method\n self.device = device\n self.configure()\n\n def configure(self) -> None:\n logging.info(f\"Loading depth guidance ...\")\n\n # setup the depth network\n\n # zoedepth\n if self.depth_method == \"zoedepth\":\n repo = \"isl-org/ZoeDepth\"\n self.zoe = torch.compile(torch.hub.load(repo, \"ZoeD_NK\", pretrained=True).to(self.device))\n\n # TODO: midas\n\n if self.depth_method == \"midas\":\n model_type = \"DPT_Large\"\n self.midas = torch.hub.load(\"intel-isl/MiDaS\", model_type).to(self.device)\n self.midas.eval()\n midas_transforms = torch.hub.load(\"intel-isl/MiDaS\", \"transforms\")\n if model_type == \"DPT_Large\" or model_type == \"DPT_Hybrid\":\n self.transform = midas_transforms.dpt_transform\n else:\n self.transform = midas_transforms.small_transform\n\n def get_depth(\n self,\n image: Float[Tensor, \"B C H W\"],\n rendered_depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n overlapping_region_mask: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n max_depth: Optional[float] = None,\n use_inverse=False,\n fov_x: Optional[float] = None,\n fov_y: Optional[float] = None,\n ):\n assert fov_x == fov_y\n batch_size, _, height, width = image.shape\n if self.depth_method != \"zoedepth\":\n assert batch_size == 1\n\n if self.depth_method == \"zoedepth\":\n predicted_depth = self.zoe.infer(image)\n elif self.depth_method == \"midas\":\n predicted_disparity = self.midas(image * 2 - 1).unsqueeze(1)\n predicted_depth = torch.where(predicted_disparity < 3, 0.0, 1 / predicted_disparity)\n else:\n raise NotImplementedError()\n\n if max_depth:\n predicted_depth[predicted_depth > max_depth] = 0.0\n\n return predicted_depth\n\n def get_distance(\n self,\n image: Float[Tensor, \"B C H W\"],\n fov_x: float,\n fov_y: float,\n rendered_distance: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n overlapping_region_mask: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n max_distance: Optional[float] = None,\n use_inverse=False,\n ):\n rendered_depth = distance_to_depth(rendered_distance, fov_x, fov_y)\n depth = self.get_depth(\n image,\n rendered_depth=rendered_depth,\n overlapping_region_mask=overlapping_region_mask,\n max_depth=max_distance,\n use_inverse=use_inverse,\n fov_x=fov_x,\n fov_y=fov_y,\n )\n distance = depth_to_distance(depth, fov_x, fov_y)\n\n if max_distance:\n distance[distance > max_distance] = 0.0\n overlapping_region_mask[distance > max_distance] = 0.0\n\n return distance" }, { "identifier": "Upscaler", "path": "nerfiller/inpaint/upscaler.py", "snippet": "class Upscaler:\n def __init__(\n self,\n device: str = \"cuda:0\",\n ):\n self.device = device\n # load model and scheduler\n model_id = \"stabilityai/stable-diffusion-x4-upscaler\"\n self.pipeline = StableDiffusionUpscalePipeline.from_pretrained(\n model_id, revision=\"fp16\", torch_dtype=torch.float16\n )\n self.pipeline = self.pipeline.to(self.device)\n self.pipeline.scheduler = DDIMScheduler.from_config(self.pipeline.scheduler.config)\n\n @torch.cuda.amp.autocast(enabled=False)\n def upsample(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n num_inference_steps: int = 20,\n noise_level: int = 20,\n ):\n batch_size = image.shape[0]\n prompt = [\"\"] * batch_size\n upscaled_image = self.pipeline(\n prompt=prompt,\n image=image,\n num_inference_steps=num_inference_steps,\n noise_level=noise_level,\n output_type=\"pt\",\n ).images\n return upscaled_image" }, { "identifier": "get_inpainted_image_row", "path": "nerfiller/utils/image_utils.py", "snippet": "def get_inpainted_image_row(\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n inpainted_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n color: Tuple[float, float, float] = Colors.NEON_PINK.value,\n show_original: bool = False,\n):\n \"\"\"Returns an image concatenated along the x-axis. It has the following form:\n image with inpaint regions highlighted | image with inpainted regions\n Inpaint where mask == 1.\n The default color is neon pink.\n If the inpainted image is None, then just show the `image with inpaint regions highlighted`.\n \"\"\"\n device = image.device\n c = torch.tensor(color, device=device).view(1, 3, 1, 1)\n color_image = torch.ones_like(image) * c\n image_with_highlights = torch.where(mask == 1, color_image, image)\n image_list = [image_with_highlights]\n if inpainted_image is not None:\n image_list = image_list + [inpainted_image]\n if show_original:\n image_list = [image] + image_list\n im = torch.cat(image_list, dim=-2)\n return im" }, { "identifier": "downscale_mask", "path": "nerfiller/utils/mask_utils.py", "snippet": "def downscale_mask(mask, size=None, scale_factor=None, dilate_iters=0, dilate_kernel_size=3):\n \"\"\"\n Downscale the mask in a conservative way. 1s are where to inpaint, 0 where to not inpaint.\n Inpaints extra pixels to prevent leakage under the mask.\n \"\"\"\n assert size or scale_factor\n if size:\n assert scale_factor is None\n if scale_factor:\n assert size is None\n for _ in range(dilate_iters):\n mask = dilate(mask, kernel_size=dilate_kernel_size)\n mask = torch.nn.functional.interpolate(mask, size=size, scale_factor=scale_factor, mode=\"bilinear\")\n mask = (mask != 0.0).float() # expands the mask slightly for no leakage of pixels\n return mask" } ]
from dataclasses import dataclass, field from typing import Literal, Optional, Type from torch.cuda.amp.grad_scaler import GradScaler from nerfstudio.pipelines.base_pipeline import VanillaPipelineConfig, VanillaPipeline from nerfiller.inpaint.rgb_inpainter import RGBInpainter, RGBInpainterXL from nerfiller.inpaint.depth_inpainter import DepthInpainter from nerfiller.inpaint.upscaler import Upscaler from nerfstudio.utils import profiler from nerfiller.utils.image_utils import ( get_inpainted_image_row, ) from nerfstudio.utils.rich_utils import Console from nerfstudio.utils.colormaps import apply_colormap, ColormapOptions from jaxtyping import Float from torch import Tensor from nerfiller.utils.mask_utils import downscale_mask from nerfiller.utils.typing import * from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes import torch import mediapy
12,702
edit_num: int = 40 """number of images to edit at a time""" edit_iters: int = 30001 """how many iterations until we stop making changes""" num_inference_steps: int = 20 multidiffusion_steps: int = 1 randomize_latents: bool = True randomize_within_grid: bool = False use_annealing: bool = True lower_bound: float = 0.4 """Lower bound for diffusion timesteps to use for image editing""" upper_bound: float = 1.0 """Upper bound for diffusion timesteps to use for image editing""" denoise_in_grid: bool = True dilate_iters: int = 5 dilate_kernel_size: int = 3 allow_camera_mismatch: bool = False tile_resolution: int = 256 upscale: bool = False inpaint_chunk_size: Optional[int] = None render_all_rate: int = 5000 reference_image: Path = Path("reference.png") lora_model_path: Optional[str] = None only_sample_from_latest: bool = True """Only sample rays from the latest inpaints.""" inpaint_method: str = "inpaint" """Strategy for inpainting a batch of images.""" text_guidance_scale: float = 0.0 image_guidance_scale: float = 1.5 inpaint_index_start: int = 0 """We will edit images starting from this index and onward.""" sds_loss_mult: float = 1.0 sds_guidance_mult: float = 10.0 sds_downscale_factor: int = 1 class NeRFillerPipeline(VanillaPipeline): """The pipeline for the NeRFiller method.""" def __init__( self, config: NeRFillerPipelineConfig, device: str, test_mode: Literal["test", "val", "inference"] = "val", world_size: int = 1, local_rank: int = 0, grad_scaler: Optional[GradScaler] = None, ): super().__init__(config, device, test_mode, world_size, local_rank, grad_scaler=grad_scaler) if test_mode != "val": # skip the rest of setup if we aren't going to train return self.grad_scaler = grad_scaler self.start_step = None self.num_train_images = len(self.datamanager.train_dataparser_outputs.image_filenames) self.load_training_modules() def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: self.trainer_base_dir = training_callback_attributes.trainer.base_dir return super().get_training_callbacks(training_callback_attributes) def load_state_dict(self, state_dict: Mapping[str, Any], strict: Optional[bool] = None): is_ddp_model_state = True model_state = {} for key, value in state_dict.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # remove "module." prefix added by DDP if is_ddp_model_state: model_state = {key[len("module.") :]: value for key, value in model_state.items()} pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")} if self.config.allow_camera_mismatch: # Don't set the weights for the appearance embedding # This sets the weights to be zero. key = "field.embedding_appearance.embedding.weight" model_state[key] = torch.zeros(self.model.field.embedding_appearance.embedding.weight.shape) try: self.model.load_state_dict(model_state, strict=True) except RuntimeError: if not strict: self.model.load_state_dict(model_state, strict=False) else: raise super().load_state_dict(pipeline_state, strict=False) def load_training_modules(self): """Load the modules.""" # RGB and depth inpainting rgb_inpaint_device = ( self.config.rgb_inpaint_device if self.config.rgb_inpaint_device is not None else self.device ) rgb_inpaint_vae_device = ( self.config.rgb_inpaint_vae_device if self.config.rgb_inpaint_vae_device is not None else rgb_inpaint_device ) if self.config.rgb_inpainter == "sd": self.rgb_inpainter = RGBInpainter( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) elif self.config.rgb_inpainter == "sdxl":
CONSOLE = Console() @dataclass class NeRFillerPipelineConfig(VanillaPipelineConfig): """The config for the NeRFiller pipeline.""" _target: Type = field(default_factory=lambda: NeRFillerPipeline) patch_size: int = 32 # inpaint args rgb_inpainter: str = "sd" rgb_inpaint_device: Optional[str] = "cuda:1" """device to put the rgb inpainting module on""" rgb_inpaint_vae_device: Optional[str] = None """device to put the vae inpainting module on. defaults to rgb inpaint device""" depth_inpaint_device: Optional[str] = "cuda:0" """device to put the depth inpainting module on""" upscale_device: Optional[str] = "cuda:0" """device to put the upscaler module on""" prompt: str = "highly detailed, 4K, hdr, sharp focus, image" """positive prompt for text-conditioned inpainting""" negative_prompt: str = "" """negative prompt for text-conditionied inpainting""" depth_method: Literal["zoedepth", "irondepth"] = "zoedepth" """which depth network to use for depth prediction or depth completion""" # sds use_sds: bool = False # du (dataset update) args use_du: bool = True """how often to update the dataset via inpainting. if 0, don't do dataset updating""" edit_rate: int = 1000 """how often to make an edit""" edit_num: int = 40 """number of images to edit at a time""" edit_iters: int = 30001 """how many iterations until we stop making changes""" num_inference_steps: int = 20 multidiffusion_steps: int = 1 randomize_latents: bool = True randomize_within_grid: bool = False use_annealing: bool = True lower_bound: float = 0.4 """Lower bound for diffusion timesteps to use for image editing""" upper_bound: float = 1.0 """Upper bound for diffusion timesteps to use for image editing""" denoise_in_grid: bool = True dilate_iters: int = 5 dilate_kernel_size: int = 3 allow_camera_mismatch: bool = False tile_resolution: int = 256 upscale: bool = False inpaint_chunk_size: Optional[int] = None render_all_rate: int = 5000 reference_image: Path = Path("reference.png") lora_model_path: Optional[str] = None only_sample_from_latest: bool = True """Only sample rays from the latest inpaints.""" inpaint_method: str = "inpaint" """Strategy for inpainting a batch of images.""" text_guidance_scale: float = 0.0 image_guidance_scale: float = 1.5 inpaint_index_start: int = 0 """We will edit images starting from this index and onward.""" sds_loss_mult: float = 1.0 sds_guidance_mult: float = 10.0 sds_downscale_factor: int = 1 class NeRFillerPipeline(VanillaPipeline): """The pipeline for the NeRFiller method.""" def __init__( self, config: NeRFillerPipelineConfig, device: str, test_mode: Literal["test", "val", "inference"] = "val", world_size: int = 1, local_rank: int = 0, grad_scaler: Optional[GradScaler] = None, ): super().__init__(config, device, test_mode, world_size, local_rank, grad_scaler=grad_scaler) if test_mode != "val": # skip the rest of setup if we aren't going to train return self.grad_scaler = grad_scaler self.start_step = None self.num_train_images = len(self.datamanager.train_dataparser_outputs.image_filenames) self.load_training_modules() def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: self.trainer_base_dir = training_callback_attributes.trainer.base_dir return super().get_training_callbacks(training_callback_attributes) def load_state_dict(self, state_dict: Mapping[str, Any], strict: Optional[bool] = None): is_ddp_model_state = True model_state = {} for key, value in state_dict.items(): if key.startswith("_model."): # remove the "_model." prefix from key model_state[key[len("_model.") :]] = value # make sure that the "module." prefix comes from DDP, # rather than an attribute of the model named "module" if not key.startswith("_model.module."): is_ddp_model_state = False # remove "module." prefix added by DDP if is_ddp_model_state: model_state = {key[len("module.") :]: value for key, value in model_state.items()} pipeline_state = {key: value for key, value in state_dict.items() if not key.startswith("_model.")} if self.config.allow_camera_mismatch: # Don't set the weights for the appearance embedding # This sets the weights to be zero. key = "field.embedding_appearance.embedding.weight" model_state[key] = torch.zeros(self.model.field.embedding_appearance.embedding.weight.shape) try: self.model.load_state_dict(model_state, strict=True) except RuntimeError: if not strict: self.model.load_state_dict(model_state, strict=False) else: raise super().load_state_dict(pipeline_state, strict=False) def load_training_modules(self): """Load the modules.""" # RGB and depth inpainting rgb_inpaint_device = ( self.config.rgb_inpaint_device if self.config.rgb_inpaint_device is not None else self.device ) rgb_inpaint_vae_device = ( self.config.rgb_inpaint_vae_device if self.config.rgb_inpaint_vae_device is not None else rgb_inpaint_device ) if self.config.rgb_inpainter == "sd": self.rgb_inpainter = RGBInpainter( device=rgb_inpaint_device, vae_device=rgb_inpaint_vae_device, lora_model_path=self.config.lora_model_path, ) elif self.config.rgb_inpainter == "sdxl":
self.rgb_inpainter = RGBInpainterXL(
1
2023-12-07 19:12:08+00:00
16k
nnanhuang/Customize-it-3D
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n t_start=-1):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback: \n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head ** -0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim),\n nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
11,602
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape)==len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape)==len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
15
2023-12-14 11:03:35+00:00
16k
mkang315/ASF-YOLO
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n ie = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if network.get_parameters()[0].get_layout().empty:\n network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n batch_dim = get_batch(network)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n executable_network = ie.compile_model(network, device_name=\"CPU\") # device_name=\"MYRIAD\" for Intel NCS2\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, \"r\") as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith(\"tensorflow\")\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.executable_network([im]).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "IMG_FORMATS", "path": "utils/dataloaders.py", "snippet": "IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/dataloaders.py", "snippet": "VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes" }, { "identifier": "LoadImages", "path": "utils/dataloaders.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n files = []\n for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n p = str(Path(p).resolve())\n if '*' in p:\n files.extend(sorted(glob.glob(p, recursive=True))) # glob\n elif os.path.isdir(p):\n files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir\n elif os.path.isfile(p):\n files.append(p) # files\n else:\n raise FileNotFoundError(f'{p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n self.transforms = transforms # optional\n self.vid_stride = vid_stride # video frame-rate stride\n if any(videos):\n self._new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n for _ in range(self.vid_stride):\n self.cap.grab()\n ret_val, im0 = self.cap.retrieve()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n path = self.files[self.count]\n self._new_video(path)\n ret_val, im0 = self.cap.read()\n\n self.frame += 1\n # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n im0 = cv2.imread(path) # BGR\n assert im0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n\n return path, im, im0, self.cap, s\n\n def _new_video(self, path):\n # Create a new video capture object\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)\n self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees\n # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493\n\n def _cv2_rotate(self, im):\n # Rotate a cv2 video manually\n if self.orientation == 0:\n return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)\n elif self.orientation == 180:\n return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif self.orientation == 90:\n return cv2.rotate(im, cv2.ROTATE_180)\n return im\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadScreenshots", "path": "utils/dataloaders.py", "snippet": "class LoadScreenshots:\n # YOLOv5 screenshot dataloader, i.e. `python detect.py --source \"screen 0 100 100 512 256\"`\n def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):\n # source = [screen_number left top width height] (pixels)\n check_requirements('mss')\n import mss\n\n source, *params = source.split()\n self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0\n if len(params) == 1:\n self.screen = int(params[0])\n elif len(params) == 4:\n left, top, width, height = (int(x) for x in params)\n elif len(params) == 5:\n self.screen, left, top, width, height = (int(x) for x in params)\n self.img_size = img_size\n self.stride = stride\n self.transforms = transforms\n self.auto = auto\n self.mode = 'stream'\n self.frame = 0\n self.sct = mss.mss()\n\n # Parse monitor shape\n monitor = self.sct.monitors[self.screen]\n self.top = monitor[\"top\"] if top is None else (monitor[\"top\"] + top)\n self.left = monitor[\"left\"] if left is None else (monitor[\"left\"] + left)\n self.width = width or monitor[\"width\"]\n self.height = height or monitor[\"height\"]\n self.monitor = {\"left\": self.left, \"top\": self.top, \"width\": self.width, \"height\": self.height}\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # mss screen capture: get raw pixels from the screen as np array\n im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR\n s = f\"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: \"\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n self.frame += 1\n return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s" }, { "identifier": "LoadStreams", "path": "utils/dataloaders.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n torch.backends.cudnn.benchmark = True # faster for fixed-size inference\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n self.vid_stride = vid_stride # video frame-rate stride\n sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]\n n = len(sources)\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video\n # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'\n check_requirements(('pafy', 'youtube_dl==2020.12.2'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n if s == 0:\n assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'\n assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n self.auto = auto and self.rect\n self.transforms = transforms # optional\n if not self.rect:\n LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f = 0, self.frames[i] # frame number, frame array\n while cap.isOpened() and n < f:\n n += 1\n cap.grab() # .read() = .grab() followed by .retrieve()\n if n % self.vid_stride == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(0.0) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n im0 = self.imgs.copy()\n if self.transforms:\n im = np.stack([self.transforms(x) for x in im0]) # transforms\n else:\n im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n im = np.ascontiguousarray(im) # contiguous\n\n return self.sources, im, im0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = \"yolov5\"\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_notebook():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.7.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef my_soft_nms(bboxes, scores, iou_thresh=0.5, sigma=0.5, score_threshold=0.25):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(path, flags=cv2.IMREAD_COLOR):\ndef imwrite(path, im):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "RANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_pil_font(font=FONT, size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):\n def fromarray(self, im):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output, max_det=300):\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path('')):\ndef imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "masks2segments", "path": "utils/segment/general.py", "snippet": "def masks2segments(masks, strategy='largest'):\n # Convert masks(n,160,160) into segments(n,xy)\n segments = []\n for x in masks.int().cpu().numpy().astype('uint8'):\n c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n if c:\n if strategy == 'concat': # concatenate all segments\n c = np.concatenate([x.reshape(-1, 2) for x in c])\n elif strategy == 'largest': # select largest segment\n c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)\n else:\n c = np.zeros((0, 2)) # no segments found\n segments.append(c.astype('float32'))\n return segments" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, dst_shape):\n \"\"\"\n Crop after upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new\n pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import os import platform import sys import time import torch from pathlib import Path from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
14,333
seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
6
2023-12-10 14:18:29+00:00
16k
youngskkim/CRN
models/camera_radar_net_det.py
[ { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" }, { "identifier": "RVTLSSFPN", "path": "layers/backbones/rvt_lss_fpn.py", "snippet": "class RVTLSSFPN(BaseLSSFPN):\n def __init__(self, **kwargs):\n super(RVTLSSFPN, self).__init__(**kwargs)\n\n self.register_buffer('frustum', self.create_frustum())\n self.z_bound = kwargs['z_bound']\n self.radar_view_transform = kwargs['radar_view_transform']\n self.camera_aware = kwargs['camera_aware']\n\n self.depth_net = self._configure_depth_net(kwargs['depth_net_conf'])\n self.view_aggregation_net = ViewAggregation(self.output_channels*2,\n self.output_channels*2,\n self.output_channels)\n\n def _configure_depth_net(self, depth_net_conf):\n return DepthNet(\n depth_net_conf['in_channels'],\n depth_net_conf['mid_channels'],\n self.output_channels,\n self.depth_channels,\n camera_aware=self.camera_aware\n )\n\n def get_geometry_collapsed(self, sensor2ego_mat, intrin_mat, ida_mat, bda_mat,\n z_min=-5., z_max=3.):\n batch_size, num_cams, _, _ = sensor2ego_mat.shape\n\n # undo post-transformation\n # B x N x D x H x W x 3\n points = self.frustum\n ida_mat = ida_mat.view(batch_size, num_cams, 1, 1, 1, 4, 4)\n points = ida_mat.inverse().matmul(points.unsqueeze(-1)).double()\n # cam_to_ego\n points = torch.cat(\n (points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3],\n points[:, :, :, :, :, 2:]), 5)\n\n combine = sensor2ego_mat.matmul(torch.inverse(intrin_mat)).double()\n points = combine.view(batch_size, num_cams, 1, 1, 1, 4,\n 4).matmul(points).half()\n if bda_mat is not None:\n bda_mat = bda_mat.unsqueeze(1).repeat(1, num_cams, 1, 1).view(\n batch_size, num_cams, 1, 1, 1, 4, 4)\n points = (bda_mat @ points).squeeze(-1)\n else:\n points = points.squeeze(-1)\n\n points_out = points[:, :, :, 0:1, :, :3]\n points_valid_z = ((points[..., 2] > z_min) & (points[..., 2] < z_max))\n\n return points_out, points_valid_z\n\n def _forward_view_aggregation_net(self, img_feat_with_depth):\n # BEVConv2D [n, c, d, h, w] -> [n, h, c, w, d]\n img_feat_with_depth = img_feat_with_depth.permute(\n 0, 3, 1, 4, 2).contiguous() # [n, c, d, h, w] -> [n, h, c, w, d]\n n, h, c, w, d = img_feat_with_depth.shape\n img_feat_with_depth = img_feat_with_depth.view(-1, c, w, d)\n img_feat_with_depth = (\n self.view_aggregation_net(img_feat_with_depth).view(\n n, h, c//2, w, d).permute(0, 2, 4, 1, 3).contiguous().float())\n return img_feat_with_depth\n\n def _forward_depth_net(self, feat, mats_dict):\n return self.depth_net(feat, mats_dict)\n\n def _split_batch_cam(self, feat, inv=False, num_cams=6):\n batch_size = feat.shape[0]\n if not inv:\n return feat.reshape(batch_size // num_cams, num_cams, *feat.shape[1:])\n else:\n return feat.reshape(batch_size * num_cams, *feat.shape[2:])\n\n def _forward_single_sweep(self,\n sweep_index,\n sweep_imgs,\n mats_dict,\n pts_context,\n pts_occupancy,\n return_depth=False):\n \"\"\"Forward function for single sweep.\n\n Args:\n sweep_index (int): Index of sweeps.\n sweep_imgs (Tensor): Input images.\n mats_dict (dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego.\n intrin_mats(Tensor): Intrinsic matrix.\n ida_mats(Tensor): Transformation matrix for ida.\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera.\n bda_mat(Tensor): Rotation matrix for bda.\n ptss_context(Tensor): Input point context feature.\n ptss_occupancy(Tensor): Input point occupancy.\n return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Returns:\n Tensor: BEV feature map.\n \"\"\"\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t5 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n\n # extract image feature\n img_feats = self.get_cam_feats(sweep_imgs)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img_backbone'].append(t1.elapsed_time(t2))\n\n source_features = img_feats[:, 0, ...]\n source_features = self._split_batch_cam(source_features, inv=True, num_cams=num_cams)\n\n # predict image context feature, depth distribution\n depth_feature = self._forward_depth_net(\n source_features,\n mats_dict,\n )\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['img_dep'].append(t2.elapsed_time(t3))\n\n image_feature = depth_feature[:, self.depth_channels:(self.depth_channels + self.output_channels)]\n\n depth_occupancy = depth_feature[:, :self.depth_channels].softmax(\n dim=1, dtype=depth_feature.dtype)\n img_feat_with_depth = depth_occupancy.unsqueeze(1) * image_feature.unsqueeze(2)\n\n # calculate frustum grid within valid height\n geom_xyz, geom_xyz_valid = self.get_geometry_collapsed(\n mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n mats_dict['intrin_mats'][:, sweep_index, ...],\n mats_dict['ida_mats'][:, sweep_index, ...],\n mats_dict.get('bda_mat', None))\n\n geom_xyz_valid = self._split_batch_cam(geom_xyz_valid, inv=True, num_cams=num_cams).unsqueeze(1)\n img_feat_with_depth = (img_feat_with_depth * geom_xyz_valid).sum(3).unsqueeze(3)\n\n if self.radar_view_transform:\n radar_occupancy = pts_occupancy.permute(0, 2, 1, 3).contiguous()\n image_feature_collapsed = (image_feature * geom_xyz_valid.max(2).values).sum(2).unsqueeze(2)\n img_feat_with_radar = radar_occupancy.unsqueeze(1) * image_feature_collapsed.unsqueeze(2)\n\n img_context = torch.cat([img_feat_with_depth, img_feat_with_radar], dim=1)\n img_context = self._forward_view_aggregation_net(img_context)\n else:\n img_context = img_feat_with_depth\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['img_transform'].append(t3.elapsed_time(t4))\n\n img_context = self._split_batch_cam(img_context, num_cams=num_cams)\n img_context = img_context.permute(0, 1, 3, 4, 5, 2).contiguous()\n\n pts_context = self._split_batch_cam(pts_context, num_cams=num_cams)\n pts_context = pts_context.unsqueeze(-2).permute(0, 1, 3, 4, 5, 2).contiguous()\n\n fused_context = torch.cat([img_context, pts_context], dim=-1)\n\n geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n self.voxel_size).int()\n geom_xyz[..., 2] = 0 # collapse z-axis\n geo_pos = torch.ones_like(geom_xyz)\n \n # sparse voxel pooling\n feature_map, _ = average_voxel_pooling(geom_xyz, fused_context.contiguous(), geo_pos,\n self.voxel_num.cuda())\n if self.times is not None:\n t5.record()\n torch.cuda.synchronize()\n self.times['img_pool'].append(t4.elapsed_time(t5))\n\n if return_depth:\n return feature_map.contiguous(), depth_feature[:, :self.depth_channels].softmax(1)\n return feature_map.contiguous()\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n ptss_context,\n ptss_occupancy,\n times=None,\n return_depth=False):\n \"\"\"Forward function.\n\n Args:\n sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n num_cameras, 3, H, W).\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n ptss_context(Tensor): Input point context feature with shape of\n (B * num_cameras, num_sweeps, C, D, W).\n ptss_occupancy(Tensor): Input point occupancy with shape of\n (B * num_cameras, num_sweeps, 1, D, W).\n times(Dict, optional): Inference time measurement.\n is_return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Return:\n Tensor: bev feature map.\n \"\"\"\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n key_frame_res = self._forward_single_sweep(\n 0,\n sweep_imgs[:, 0:1, ...],\n mats_dict,\n ptss_context[:, 0, ...] if ptss_context is not None else None,\n ptss_occupancy[:, 0, ...] if ptss_occupancy is not None else None,\n return_depth=return_depth)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n if return_depth:\n return key_frame_res[0].unsqueeze(1), key_frame_res[1], self.times\n else:\n return key_frame_res.unsqueeze(1), self.times\n\n key_frame_feature = key_frame_res[0] if return_depth else key_frame_res\n ret_feature_list = [key_frame_feature]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n sweep_index,\n sweep_imgs[:, sweep_index:sweep_index + 1, ...],\n mats_dict,\n ptss_context[:, sweep_index, ...] if ptss_context is not None else None,\n ptss_occupancy[:, sweep_index, ...] if ptss_occupancy is not None else None,\n return_depth=False)\n ret_feature_list.append(feature_map)\n\n if return_depth:\n return torch.stack(ret_feature_list, 1), key_frame_res[1], self.times\n else:\n return torch.stack(ret_feature_list, 1), self.times" }, { "identifier": "PtsBackbone", "path": "layers/backbones/pts_backbone.py", "snippet": "class PtsBackbone(nn.Module):\n \"\"\"Pillar Feature Net.\n\n The network prepares the pillar features and performs forward pass\n through PFNLayers.\n\n Args:\n in_channels (int, optional): Number of input features,\n either x, y, z or x, y, z, r. Defaults to 4.\n feat_channels (tuple, optional): Number of features in each of the\n N PFNLayers. Defaults to (64, ).\n with_distance (bool, optional): Whether to include Euclidean distance\n to points. Defaults to False.\n with_cluster_center (bool, optional): [description]. Defaults to True.\n with_voxel_center (bool, optional): [description]. Defaults to True.\n voxel_size (tuple[float], optional): Size of voxels, only utilize x\n and y size. Defaults to (0.2, 0.2, 4).\n point_cloud_range (tuple[float], optional): Point cloud range, only\n utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1).\n norm_cfg ([type], optional): [description].\n Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).\n mode (str, optional): The mode to gather point features. Options are\n 'max' or 'avg'. Defaults to 'max'.\n legacy (bool, optional): Whether to use the new behavior or\n the original behavior. Defaults to True.\n \"\"\"\n\n def __init__(self,\n pts_voxel_layer,\n pts_voxel_encoder,\n pts_middle_encoder,\n pts_backbone,\n pts_neck,\n return_context=True,\n return_occupancy=True,\n **kwargs,\n ):\n super(PtsBackbone, self).__init__()\n\n self.pts_voxel_layer = Voxelization(**pts_voxel_layer)\n self.pts_voxel_encoder = builder.build_voxel_encoder(pts_voxel_encoder)\n self.pts_middle_encoder = builder.build_middle_encoder(pts_middle_encoder)\n self.pts_backbone = builder.build_backbone(pts_backbone)\n self.return_context = return_context\n self.return_occupancy = return_occupancy\n mid_channels = pts_backbone['out_channels'][-1]\n if pts_neck is not None:\n self.pts_neck = builder.build_neck(pts_neck)\n mid_channels = sum(pts_neck['out_channels'])\n else:\n self.pts_neck = None\n\n if self.return_context:\n if 'out_channels_pts' in kwargs:\n out_channels = kwargs['out_channels_pts']\n else:\n out_channels = 80\n self.pred_context = nn.Sequential(\n nn.Conv2d(mid_channels,\n mid_channels//2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode='reflect'),\n nn.BatchNorm2d(mid_channels//2),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels//2,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=0),\n )\n\n if self.return_occupancy:\n self.pred_occupancy = nn.Sequential(\n nn.Conv2d(mid_channels,\n mid_channels//2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode='reflect'),\n nn.BatchNorm2d(mid_channels//2),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels//2,\n 1,\n kernel_size=1,\n stride=1,\n padding=0),\n )\n\n if 'occupancy_init' in kwargs:\n occupancy_init = kwargs['occupancy_init']\n else:\n occupancy_init = 0.01\n self.pred_occupancy[-1].bias.data.fill_(bias_init_with_prob(occupancy_init))\n\n def voxelize(self, points):\n \"\"\"Apply dynamic voxelization to points.\n\n Args:\n points (list[torch.Tensor]): Points of each sample.\n\n Returns:\n tuple[torch.Tensor]: Concatenated points, number of points\n per voxel, and coordinates.\n \"\"\"\n voxels, coors, num_points = [], [], []\n batch_size, _, _ = points.shape\n points_list = [points[i] for i in range(batch_size)]\n\n for res in points_list:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch\n\n def _forward_single_sweep(self, pts):\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n B, N, P, F = pts.shape\n batch_size = B * N\n pts = pts.contiguous().view(B*N, P, F)\n\n voxels, num_points, coors = self.voxelize(pts)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['pts_voxelize'].append(t1.elapsed_time(t2))\n\n voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)\n x = self.pts_middle_encoder(voxel_features, coors, batch_size)\n x = self.pts_backbone(x)\n if self.pts_neck is not None:\n x = self.pts_neck(x)\n\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['pts_backbone'].append(t2.elapsed_time(t3))\n\n x_context = None\n x_occupancy = None\n if self.return_context:\n x_context = self.pred_context(x[-1]).unsqueeze(1)\n if self.return_occupancy:\n x_occupancy = self.pred_occupancy(x[-1]).unsqueeze(1).sigmoid()\n\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['pts_head'].append(t3.elapsed_time(t4))\n\n return x_context, x_occupancy\n\n def forward(self, ptss, times=None):\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, _, _ = ptss.shape\n\n key_context, key_occupancy = self._forward_single_sweep(ptss[:, 0, ...])\n \n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['pts'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n return key_context, key_occupancy, self.times\n\n context_list = [key_context]\n occupancy_list = [key_occupancy]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n context, occupancy = self._forward_single_sweep(ptss[:, sweep_index, ...])\n context_list.append(context)\n occupancy_list.append(occupancy)\n\n ret_context = None\n ret_occupancy = None\n if self.return_context:\n ret_context = torch.cat(context_list, 1)\n if self.return_occupancy:\n ret_occupancy = torch.cat(occupancy_list, 1)\n return ret_context, ret_occupancy, self.times" }, { "identifier": "MFAFuser", "path": "layers/fuser/multimodal_feature_aggregation.py", "snippet": "class MFAFuser(nn.Module):\n def __init__(self, num_sweeps=4, img_dims=80, pts_dims=128, embed_dims=256,\n num_layers=6, num_heads=4, bev_shape=(128, 128)):\n super(MFAFuser, self).__init__()\n\n self.num_modalities = 2\n self.use_cams_embeds = False\n\n self.num_heads = num_heads\n\n self.img_dims = img_dims\n self.pts_dims = pts_dims\n self.embed_dims = embed_dims\n _pos_dim_ = self.embed_dims//2\n _ffn_dim_ = self.embed_dims*2\n\n self.norm_img = build_norm_layer(dict(type='LN'), img_dims)[1]\n self.norm_pts = build_norm_layer(dict(type='LN'), pts_dims)[1]\n self.input_proj = nn.Linear(img_dims + pts_dims, self.embed_dims)\n\n self.bev_h, self.bev_w = bev_shape\n\n self.positional_encoding = build_positional_encoding(\n dict(\n type='LearnedPositionalEncoding',\n num_feats=_pos_dim_,\n row_num_embed=self.bev_h,\n col_num_embed=self.bev_w,\n ),\n )\n self.register_buffer('ref_2d', self.get_reference_points(self.bev_h, self.bev_w))\n\n ffn_cfgs = dict(\n type='FFN',\n embed_dims=self.embed_dims,\n feedforward_channels=_ffn_dim_,\n num_fcs=2,\n ffn_drop=0.1,\n act_cfg=dict(type='ReLU', inplace=True),\n )\n norm_cfgs = dict(type='LN')\n\n self.ffn_layers = ModuleList()\n for _ in range(num_layers):\n self.ffn_layers.append(\n build_feedforward_network(ffn_cfgs)\n )\n self.norm_layers1 = ModuleList()\n for _ in range(num_layers):\n self.norm_layers1.append(\n build_norm_layer(norm_cfgs, self.embed_dims)[1],\n )\n self.norm_layers2 = ModuleList()\n for _ in range(num_layers):\n self.norm_layers2.append(\n build_norm_layer(norm_cfgs, self.embed_dims)[1],\n )\n self.attn_layers = ModuleList()\n for _ in range(num_layers):\n self.attn_layers.append(\n DeformableCrossAttention(\n img_dims=self.img_dims,\n pts_dims=self.pts_dims,\n embed_dims=self.embed_dims,\n num_heads=self.num_heads,\n num_modalities=self.num_modalities,\n num_points=4\n ),\n )\n\n self.reduce_conv = nn.Sequential(\n nn.Conv2d(embed_dims*num_sweeps,\n embed_dims,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n nn.BatchNorm2d(embed_dims),\n nn.ReLU(inplace=True),\n )\n\n self.init_weights()\n\n def init_weights(self):\n \"\"\"Initialize the transformer weights.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, DeformableCrossAttention):\n try:\n m.init_weight()\n except AttributeError:\n m.init_weights()\n\n @staticmethod\n def get_reference_points(H, W, dtype=torch.float):\n \"\"\"Get the reference points used in SCA and TSA.\n Args:\n H, W: spatial shape of bev.\n Z: hight of pillar.\n D: sample D points uniformly from each pillar.\n device (obj:`device`): The device where\n reference_points should be.\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(\n 0.5, H - 0.5, H, dtype=dtype),\n torch.linspace(\n 0.5, W - 0.5, W, dtype=dtype)\n )\n ref_y = ref_y.reshape(-1)[None] / H\n ref_x = ref_x.reshape(-1)[None] / W\n ref_2d = torch.stack((ref_x, ref_y), -1)\n ref_2d = ref_2d.unsqueeze(2).unsqueeze(3)\n return ref_2d\n\n @auto_fp16(apply_to=('feat_img', 'feat_pts'))\n def _forward_single_sweep(self, feat_img, feat_pts):\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n bs = feat_img.shape[0]\n ref_2d_stack = self.ref_2d.repeat(bs, 1, 1, self.num_modalities, 1)\n\n feat_img = self.norm_img(feat_img.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2).contiguous()\n feat_pts = self.norm_pts(feat_pts.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2).contiguous()\n\n feat_flatten = []\n spatial_shapes = []\n for feat in [feat_img, feat_pts]:\n _, _, h, w = feat.shape\n spatial_shape = (h, w)\n feat = feat.flatten(2).permute(0, 2, 1).contiguous() # [bs, num_cam, c, dw] -> [num_cam, bs, dw, c]\n spatial_shapes.append(spatial_shape)\n feat_flatten.append(feat)\n\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_img.device)\n level_start_index = torch.cat((spatial_shapes.new_zeros(\n (1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))\n\n bev_queries = torch.cat(feat_flatten, -1)\n bev_queries = self.input_proj(bev_queries)\n\n bev_mask = torch.zeros((bs, self.bev_h, self.bev_w),\n device=bev_queries.device).to(feat_img.dtype)\n bev_pos = self.positional_encoding(bev_mask).to(feat_img.dtype)\n bev_pos = bev_pos.flatten(2).permute(0, 2, 1).contiguous()\n\n feat_img = feat_flatten[0]\n feat_pts = feat_flatten[1]\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['fusion_pre'].append(t1.elapsed_time(t2))\n\n for attn_layer, ffn_layer, norm_layer1, norm_layer2 in \\\n zip(self.attn_layers, self.ffn_layers, self.norm_layers1, self.norm_layers2):\n # post norm\n bev_queries = attn_layer(\n bev_queries,\n feat_img,\n feat_pts,\n identity=None,\n query_pos=bev_pos,\n reference_points=ref_2d_stack,\n spatial_shapes=spatial_shapes,\n level_start_index=level_start_index,\n )\n bev_queries = norm_layer1(bev_queries)\n bev_queries = ffn_layer(bev_queries, identity=None)\n bev_queries = norm_layer2(bev_queries)\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['fusion_layer'].append(t2.elapsed_time(t3))\n\n output = bev_queries.permute(0, 2, 1).contiguous().reshape(bs, self.embed_dims, h, w)\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['fusion_post'].append(t3.elapsed_time(t4))\n\n return output\n\n def forward(self, feats, times=None):\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n num_sweeps = feats.shape[1]\n key_frame_res = self._forward_single_sweep(\n feats[:, 0, :self.img_dims],\n feats[:, 0, self.img_dims:self.img_dims+self.pts_dims]\n )\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['fusion'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n return key_frame_res, self.times\n\n ret_feature_list = [key_frame_res]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n feats[:, sweep_index, :self.img_dims],\n feats[:, sweep_index, self.img_dims:self.img_dims+self.pts_dims])\n ret_feature_list.append(feature_map)\n\n return self.reduce_conv(torch.cat(ret_feature_list, 1)).float(), self.times" }, { "identifier": "BEVDepthHead", "path": "layers/heads/bev_depth_head_det.py", "snippet": "class BEVDepthHead(CenterHead):\n \"\"\"Head for BevDepth.\n\n Args:\n in_channels(int): Number of channels after bev_neck.\n tasks(dict): Tasks for head.\n bbox_coder(dict): Config of bbox coder.\n common_heads(dict): Config of head for each task.\n loss_cls(dict): Config of classification loss.\n loss_bbox(dict): Config of regression loss.\n gaussian_overlap(float): Gaussian overlap used for `get_targets`.\n min_radius(int): Min radius used for `get_targets`.\n train_cfg(dict): Config used in the training process.\n test_cfg(dict): Config used in the test process.\n bev_backbone_conf(dict): Cnfig of bev_backbone.\n bev_neck_conf(dict): Cnfig of bev_neck.\n \"\"\"\n def __init__(\n self,\n in_channels=256,\n tasks=None,\n bbox_coder=None,\n common_heads=dict(),\n loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),\n loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n gaussian_overlap=0.1,\n min_radius=2,\n train_cfg=None,\n test_cfg=None,\n bev_backbone_conf=bev_backbone_conf,\n bev_neck_conf=bev_neck_conf,\n separate_head=dict(type='SeparateHead',\n init_bias=-2.19,\n final_kernel=3),\n ):\n super(BEVDepthHead, self).__init__(\n in_channels=in_channels,\n tasks=tasks,\n bbox_coder=bbox_coder,\n common_heads=common_heads,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n separate_head=separate_head,\n )\n self.trunk = build_backbone(bev_backbone_conf)\n self.trunk.init_weights()\n self.neck = build_neck(bev_neck_conf)\n self.neck.init_weights()\n del self.trunk.maxpool\n self.gaussian_overlap = gaussian_overlap\n self.min_radius = min_radius\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @autocast(False)\n def forward(self, x, times=None):\n \"\"\"Forward pass.\n\n Args:\n x (list[torch.Tensor]): Multi-level features, e.g.,\n features produced by FPN.\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n # FPN\n trunk_outs = [x]\n if self.trunk.deep_stem:\n x = self.trunk.stem(x)\n else:\n x = self.trunk.conv1(x)\n x = self.trunk.norm1(x)\n x = self.trunk.relu(x)\n for i, layer_name in enumerate(self.trunk.res_layers):\n res_layer = getattr(self.trunk, layer_name)\n x = res_layer(x)\n if i in self.trunk.out_indices:\n trunk_outs.append(x)\n fpn_output = self.neck(trunk_outs)\n\n if times is not None:\n t2.record()\n torch.cuda.synchronize()\n times['head_backbone'].append(t1.elapsed_time(t2))\n\n ret_values = super().forward(fpn_output)\n\n if times is not None:\n t3.record()\n torch.cuda.synchronize()\n times['head_head'].append(t2.elapsed_time(t3))\n times['head'].append(t1.elapsed_time(t3))\n\n return ret_values, times\n\n def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']\n grid_size = torch.tensor(self.train_cfg['grid_size'])\n pc_range = torch.tensor(self.train_cfg['point_cloud_range'])\n voxel_size = torch.tensor(self.train_cfg['voxel_size'])\n\n feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']\n\n # reorganize the gt_dict by tasks\n task_masks = []\n flag = 0\n for class_name in self.class_names:\n task_masks.append([\n torch.where(gt_labels_3d == class_name.index(i) + flag)\n for i in class_name\n ])\n flag += len(class_name)\n\n task_boxes = []\n task_classes = []\n flag2 = 0\n for idx, mask in enumerate(task_masks):\n task_box = []\n task_class = []\n for m in mask:\n task_box.append(gt_bboxes_3d[m])\n # 0 is background for each task, so we need to add 1 here.\n task_class.append(gt_labels_3d[m] + 1 - flag2)\n task_boxes.append(\n torch.cat(task_box, axis=0).to(gt_bboxes_3d.device))\n task_classes.append(\n torch.cat(task_class).long().to(gt_bboxes_3d.device))\n flag2 += len(mask)\n draw_gaussian = draw_heatmap_gaussian\n heatmaps, anno_boxes, inds, masks = [], [], [], []\n\n for idx, task_head in enumerate(self.task_heads):\n heatmap = gt_bboxes_3d.new_zeros(\n (len(self.class_names[idx]), feature_map_size[1],\n feature_map_size[0]),\n device='cuda')\n\n anno_box = gt_bboxes_3d.new_zeros((max_objs, 10),\n dtype=torch.float32,\n device='cuda')\n\n ind = gt_labels_3d.new_zeros((max_objs),\n dtype=torch.int64,\n device='cuda')\n mask = gt_bboxes_3d.new_zeros((max_objs),\n dtype=torch.uint8,\n device='cuda')\n\n num_objs = min(task_boxes[idx].shape[0], max_objs)\n\n for k in range(num_objs):\n cls_id = task_classes[idx][k] - 1\n\n width = task_boxes[idx][k][3]\n length = task_boxes[idx][k][4]\n width = width / voxel_size[0] / self.train_cfg[\n 'out_size_factor']\n length = length / voxel_size[1] / self.train_cfg[\n 'out_size_factor']\n\n if width > 0 and length > 0:\n radius = gaussian_radius(\n (length, width),\n min_overlap=self.train_cfg['gaussian_overlap'])\n radius = max(self.train_cfg['min_radius'], int(radius))\n\n # be really careful for the coordinate system of\n # your box annotation.\n x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][\n 1], task_boxes[idx][k][2]\n\n coor_x = (\n x - pc_range[0]\n ) / voxel_size[0] / self.train_cfg['out_size_factor']\n coor_y = (\n y - pc_range[1]\n ) / voxel_size[1] / self.train_cfg['out_size_factor']\n\n center = torch.tensor([coor_x, coor_y],\n dtype=torch.float32,\n device='cuda')\n center_int = center.to(torch.int32)\n\n # throw out not in range objects to avoid out of array\n # area when creating the heatmap\n if not (0 <= center_int[0] < feature_map_size[0]\n and 0 <= center_int[1] < feature_map_size[1]):\n continue\n\n draw_gaussian(heatmap[cls_id], center_int, radius)\n\n new_idx = k\n x, y = center_int[0], center_int[1]\n\n assert y * feature_map_size[0] + x < feature_map_size[\n 0] * feature_map_size[1]\n\n ind[new_idx] = y * feature_map_size[0] + x\n mask[new_idx] = 1\n\n vx, vy = task_boxes[idx][k][7:]\n rot = task_boxes[idx][k][6]\n box_dim = task_boxes[idx][k][3:6]\n if self.norm_bbox:\n box_dim = box_dim.log()\n anno_box[new_idx] = torch.cat([\n center - torch.tensor([x, y], device='cuda'),\n z.unsqueeze(0),\n box_dim,\n torch.sin(rot).unsqueeze(0),\n torch.cos(rot).unsqueeze(0),\n vx.unsqueeze(0),\n vy.unsqueeze(0),\n ])\n\n heatmaps.append(heatmap)\n anno_boxes.append(anno_box)\n masks.append(mask)\n inds.append(ind)\n return heatmaps, anno_boxes, inds, masks\n\n def loss(self, targets, preds_dicts, **kwargs):\n \"\"\"Loss function for BEVDepthHead.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n heatmaps, anno_boxes, inds, masks = targets\n return_loss = 0\n return_loss_heatmap, return_loss_bbox = 0, 0\n for task_id, preds_dict in enumerate(preds_dicts):\n # heatmap focal loss\n preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap'])\n num_pos = heatmaps[task_id].eq(1).float().sum().item()\n cls_avg_factor = torch.clamp(reduce_mean(\n heatmaps[task_id].new_tensor(num_pos)),\n min=1).item()\n loss_heatmap = self.loss_cls(preds_dict[0]['heatmap'],\n heatmaps[task_id],\n avg_factor=cls_avg_factor)\n target_box = anno_boxes[task_id]\n # reconstruct the anno_box from multiple reg heads\n preds_dict[0]['anno_box'] = torch.cat(\n (\n preds_dict[0]['reg'],\n preds_dict[0]['height'],\n preds_dict[0]['dim'],\n preds_dict[0]['rot'],\n preds_dict[0]['vel'],\n ),\n dim=1,\n )\n\n # Regression loss for dimension, offset, height, rotation\n num = masks[task_id].float().sum()\n ind = inds[task_id]\n pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous()\n pred = pred.view(pred.size(0), -1, pred.size(3))\n pred = self._gather_feat(pred, ind)\n mask = masks[task_id].unsqueeze(2).expand_as(target_box).float()\n num = torch.clamp(reduce_mean(target_box.new_tensor(num)),\n min=1e-4).item()\n isnotnan = (~torch.isnan(target_box)).float()\n mask *= isnotnan\n code_weights = self.train_cfg['code_weights']\n bbox_weights = mask * mask.new_tensor(code_weights)\n loss_bbox = self.loss_bbox(pred,\n target_box,\n bbox_weights,\n avg_factor=num)\n return_loss += loss_bbox\n return_loss += loss_heatmap\n return_loss_bbox += loss_bbox\n return_loss_heatmap += loss_heatmap\n return return_loss, return_loss_heatmap, return_loss_bbox" } ]
import mmcv from models.base_bev_depth import BaseBEVDepth from layers.backbones.rvt_lss_fpn import RVTLSSFPN from layers.backbones.pts_backbone import PtsBackbone from layers.fuser.multimodal_feature_aggregation import MFAFuser from layers.heads.bev_depth_head_det import BEVDepthHead
11,670
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet']
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet']
class CameraRadarNetDet(BaseBEVDepth):
0
2023-12-06 14:57:49+00:00
16k
LIU-Yuxin/SyncMVD
src/pipeline.py
[ { "identifier": "UVProjection", "path": "src/renderer/project.py", "snippet": "class UVProjection():\n\tdef __init__(self, texture_size=96, render_size=64, sampling_mode=\"nearest\", channels=3, device=None):\n\t\tself.channels = channels\n\t\tself.device = device or torch.device(\"cpu\")\n\t\tself.lights = AmbientLights(ambient_color=((1.0,)*channels,), device=self.device)\n\t\tself.target_size = (texture_size,texture_size)\n\t\tself.render_size = render_size\n\t\tself.sampling_mode = sampling_mode\n\n\n\t# Load obj mesh, rescale the mesh to fit into the bounding box\n\tdef load_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tmesh = load_objs_as_meshes([mesh_path], device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2\n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\t\t\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\tdef load_glb_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tfrom pytorch3d.io.experimental_gltf_io import MeshGlbFormat\n\t\tio = IO()\n\t\tio.register_meshes_format(MeshGlbFormat())\n\t\twith open(mesh_path, \"rb\") as f:\n\t\t\tmesh = io.load_mesh(f, include_textures=True, device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2 \n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\t# Save obj mesh\n\tdef save_mesh(self, mesh_path, texture):\n\t\tsave_obj(mesh_path, \n\t\t\t\tself.mesh.verts_list()[0],\n\t\t\t\tself.mesh.faces_list()[0],\n\t\t\t\tverts_uvs= self.mesh.textures.verts_uvs_list()[0],\n\t\t\t\tfaces_uvs= self.mesh.textures.faces_uvs_list()[0],\n\t\t\t\ttexture_map=texture)\n\n\t# Code referred to TEXTure code (https://github.com/TEXTurePaper/TEXTurePaper.git)\n\tdef uv_unwrap(self, mesh):\n\t\tverts_list = mesh.verts_list()[0]\n\t\tfaces_list = mesh.faces_list()[0]\n\n\n\t\timport xatlas\n\t\timport numpy as np\n\t\tv_np = verts_list.cpu().numpy()\n\t\tf_np = faces_list.int().cpu().numpy()\n\t\tatlas = xatlas.Atlas()\n\t\tatlas.add_mesh(v_np, f_np)\n\t\tchart_options = xatlas.ChartOptions()\n\t\tchart_options.max_iterations = 4\n\t\tatlas.generate(chart_options=chart_options)\n\t\tvmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n\t\tvt = torch.from_numpy(vt_np.astype(np.float32)).type(verts_list.dtype).to(mesh.device)\n\t\tft = torch.from_numpy(ft_np.astype(np.int64)).type(faces_list.dtype).to(mesh.device)\n\n\t\tnew_map = torch.zeros(self.target_size+(self.channels,), device=mesh.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\t[ft], \n\t\t\t[vt], \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\n\t\tmesh.textures = new_tex\n\t\treturn mesh\n\n\n\t'''\n\t\tA functions that disconnect faces in the mesh according to\n\t\tits UV seams. The number of vertices are made equal to the\n\t\tnumber of unique vertices its UV layout, while the faces list\n\t\tis intact.\n\t'''\n\tdef disconnect_faces(self):\n\t\tmesh = self.mesh\n\t\tverts_list = mesh.verts_list()\n\t\tfaces_list = mesh.faces_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\tfaces_uvs_list = mesh.textures.faces_uvs_list()\n\t\tpacked_list = [v[f] for v,f in zip(verts_list, faces_list)]\n\t\tverts_disconnect_list = [\n\t\t\ttorch.zeros(\n\t\t\t\t(verts_uvs_list[i].shape[0], 3), \n\t\t\t\tdtype=verts_list[0].dtype, \n\t\t\t\tdevice=verts_list[0].device\n\t\t\t) \n\t\t\tfor i in range(len(verts_list))]\n\t\tfor i in range(len(verts_list)):\n\t\t\tverts_disconnect_list[i][faces_uvs_list] = packed_list[i]\n\t\tassert not mesh.has_verts_normals(), \"Not implemented for vertex normals\"\n\t\tself.mesh_d = Meshes(verts_disconnect_list, faces_uvs_list, mesh.textures)\n\t\treturn self.mesh_d\n\n\n\t'''\n\t\tA function that construct a temp mesh for back-projection.\n\t\tTake a disconnected mesh and a rasterizer, the function calculates\n\t\tthe projected faces as the UV, as use its original UV with pseudo\n\t\tz value as world space geometry.\n\t'''\n\tdef construct_uv_mesh(self):\n\t\tmesh = self.mesh_d\n\t\tverts_list = mesh.verts_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\t# faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()]\n\t\tnew_verts_list = []\n\t\tfor i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)):\n\t\t\tverts = verts.clone()\n\t\t\tverts_uv = verts_uv.clone()\n\t\t\tverts[...,0:2] = verts_uv[...,:]\n\t\t\tverts = (verts - 0.5) * 2\n\t\t\tverts[...,2] *= 1\n\t\t\tnew_verts_list.append(verts)\n\t\ttextures_uv = mesh.textures.clone()\n\t\tself.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv)\n\t\treturn self.mesh_uv\n\n\n\t# Set texture for the current mesh.\n\tdef set_texture_map(self, texture):\n\t\tnew_map = texture.permute(1, 2, 0)\n\t\tnew_map = new_map.to(self.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\tself.mesh.textures.faces_uvs_padded(), \n\t\t\tself.mesh.textures.verts_uvs_padded(), \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\t\tself.mesh.textures = new_tex\n\n\n\t# Set the initial normal noise texture\n\t# No generator here for replication of the experiment result. Add one as you wish\n\tdef set_noise_texture(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tnoise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device)\n\t\tself.set_texture_map(noise_texture)\n\t\treturn noise_texture\n\n\n\t# Set the cameras given the camera poses and centers\n\tdef set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None):\n\t\telev = torch.FloatTensor([pose[0] for pose in camera_poses])\n\t\tazim = torch.FloatTensor([pose[1] for pose in camera_poses])\n\t\tR, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),))\n\t\tself.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),))\n\n\n\t# Set all necessary internal data for rendering and texture baking\n\t# Can be used to refresh after changing camera positions\n\tdef set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None):\n\t\tself.set_cameras(camera_poses, centers, camera_distance, scale=scale)\n\t\tif render_size is None:\n\t\t\trender_size = self.render_size\n\t\tif not hasattr(self, \"renderer\"):\n\t\t\tself.setup_renderer(size=render_size)\n\t\tif not hasattr(self, \"mesh_d\"):\n\t\t\tself.disconnect_faces()\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\t\tself.calculate_tex_gradient()\n\t\tself.calculate_visible_triangle_mask()\n\t\t_,_,_,cos_maps,_, _ = self.render_geometry()\n\t\tself.calculate_cos_angle_weights(cos_maps)\n\n\n\t# Setup renderers for rendering\n\t# max faces per bin set to 30000 to avoid overflow in many test cases.\n\t# You can use default value to let pytorch3d handle that for you.\n\tdef setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tself.raster_settings = RasterizationSettings(\n\t\t\timage_size=size, \n\t\t\tblur_radius=blur, \n\t\t\tfaces_per_pixel=face_per_pix,\n\t\t\tperspective_correct=perspective_correct,\n\t\t\tcull_backfaces=True,\n\t\t\tmax_faces_per_bin=30000,\n\t\t)\n\n\t\tself.renderer = MeshRenderer(\n\t\t\trasterizer=MeshRasterizer(\n\t\t\t\tcameras=self.cameras, \n\t\t\t\traster_settings=self.raster_settings,\n\n\t\t\t),\n\t\t\tshader=HardNChannelFlatShader(\n\t\t\t\tdevice=self.device, \n\t\t\t\tcameras=self.cameras,\n\t\t\t\tlights=self.lights,\n\t\t\t\tchannels=channels\n\t\t\t\t# materials=materials\n\t\t\t)\n\t\t)\n\n\n\t# Bake screen-space cosine weights to UV space\n\t# May be able to reimplement using the generic \"bake_texture\" function, but it works so leave it here for now\n\[email protected]_grad()\n\tdef calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tcos_maps = []\n\t\ttmp_mesh = self.mesh.clone()\n\t\tfor i in range(len(self.cameras)):\n\t\t\t\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\n\t\t\tloss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tif fill:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\t\tzero_map = voronoi_solve(zero_map, self.gradient_maps[i][...,0])\n\t\t\telse:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i]+1E-8)\n\t\t\tcos_maps.append(zero_map)\n\t\tself.cos_maps = cos_maps\n\n\t\t\n\t# Get geometric info from fragment shader\n\t# Can be used for generating conditioning image and cosine weights\n\t# Returns some information you may not need, remember to release them for memory saving\n\[email protected]_grad()\n\tdef render_geometry(self, image_size=None):\n\t\tif image_size:\n\t\t\tsize = self.renderer.rasterizer.raster_settings.image_size\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = image_size\n\t\tshader = self.renderer.shader\n\t\tself.renderer.shader = HardGeometryShader(device=self.device, cameras=self.cameras[0], lights=self.lights)\n\t\ttmp_mesh = self.mesh.clone()\n\t\t\n\t\tverts, normals, depths, cos_angles, texels, fragments = self.renderer(tmp_mesh.extend(len(self.cameras)), cameras=self.cameras, lights=self.lights)\n\t\tself.renderer.shader = shader\n\n\t\tif image_size:\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = size\n\n\t\treturn verts, normals, depths, cos_angles, texels, fragments\n\n\n\t# Project world normal to view space and normalize\n\[email protected]_grad()\n\tdef decode_view_normal(self, normals):\n\t\tw2v_mat = self.cameras.get_full_projection_transform()\n\t\tnormals_view = torch.clone(normals)[:,:,:,0:3]\n\t\tnormals_view = normals_view.reshape(normals_view.shape[0], -1, 3)\n\t\tnormals_view = w2v_mat.transform_normals(normals_view)\n\t\tnormals_view = normals_view.reshape(normals.shape[0:3]+(3,))\n\t\tnormals_view[:,:,:,2] *= -1\n\t\tnormals = (normals_view[...,0:3]+1) * normals[...,3:] / 2 + torch.FloatTensor(((((0.5,0.5,1))))).to(self.device) * (1 - normals[...,3:])\n\t\t# normals = torch.cat([normal for normal in normals], dim=1)\n\t\tnormals = normals.clamp(0, 1)\n\t\treturn normals\n\n\n\t# Normalize absolute depth to inverse depth\n\[email protected]_grad()\n\tdef decode_normalized_depth(self, depths, batched_norm=False):\n\t\tview_z, mask = depths.unbind(-1)\n\t\tview_z = view_z * mask + 100 * (1-mask)\n\t\tinv_z = 1 / view_z\n\t\tinv_z_min = inv_z * mask + 100 * (1-mask)\n\t\tif not batched_norm:\n\t\t\tmax_ = torch.max(inv_z, 1, keepdim=True)\n\t\t\tmax_ = torch.max(max_[0], 2, keepdim=True)[0]\n\n\t\t\tmin_ = torch.min(inv_z_min, 1, keepdim=True)\n\t\t\tmin_ = torch.min(min_[0], 2, keepdim=True)[0]\n\t\telse:\n\t\t\tmax_ = torch.max(inv_z)\n\t\t\tmin_ = torch.min(inv_z_min)\n\t\tinv_z = (inv_z - min_) / (max_ - min_)\n\t\tinv_z = inv_z.clamp(0,1)\n\t\tinv_z = inv_z[...,None].repeat(1,1,1,3)\n\n\t\treturn inv_z\n\n\n\t# Multiple screen pixels could pass gradient to a same texel\n\t# We can precalculate this gradient strength and use it to normalize gradients when we bake textures\n\[email protected]_grad()\n\tdef calculate_tex_gradient(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\ttmp_mesh = self.mesh.clone()\n\t\tgradient_maps = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\t\t\tloss = torch.sum((1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tgradient_maps.append(zero_map.detach())\n\n\t\tself.gradient_maps = gradient_maps\n\n\n\t# Get the UV space masks of triangles visible in each view\n\t# First get face ids from each view, then filter pixels on UV space to generate masks\n\[email protected]_grad()\n\tdef calculate_visible_triangle_mask(self, channels=None, image_size=(512,512)):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tpix2face_list = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=image_size\n\t\t\tpix2face = self.renderer.rasterizer(self.mesh_d, cameras=self.cameras[i]).pix_to_face\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=self.render_size\n\t\t\tpix2face_list.append(pix2face)\n\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\n\t\traster_settings = RasterizationSettings(\n\t\t\timage_size=self.target_size, \n\t\t\tblur_radius=0, \n\t\t\tfaces_per_pixel=1,\n\t\t\tperspective_correct=False,\n\t\t\tcull_backfaces=False,\n\t\t\tmax_faces_per_bin=30000,\n\t\t\t)\n\n\t\tR, T = look_at_view_transform(dist=2, elev=0, azim=0)\n\t\tcameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n\n\t\trasterizer=MeshRasterizer(\n\t\t\tcameras=cameras, \n\t\t\traster_settings=raster_settings\n\t\t)\n\t\tuv_pix2face = rasterizer(self.mesh_uv).pix_to_face\n\n\t\tvisible_triangles = []\n\t\tfor i in range(len(pix2face_list)):\n\t\t\tvalid_faceid = torch.unique(pix2face_list[i])\n\t\t\tvalid_faceid = valid_faceid[1:] if valid_faceid[0]==-1 else valid_faceid\n\t\t\tmask = torch.isin(uv_pix2face[0], valid_faceid, assume_unique=False)\n\t\t\t# uv_pix2face[0][~mask] = -1\n\t\t\ttriangle_mask = torch.ones(self.target_size+(1,), device=self.device)\n\t\t\ttriangle_mask[~mask] = 0\n\t\t\t\n\t\t\ttriangle_mask[:,1:][triangle_mask[:,:-1] > 0] = 1\n\t\t\ttriangle_mask[:,:-1][triangle_mask[:,1:] > 0] = 1\n\t\t\ttriangle_mask[1:,:][triangle_mask[:-1,:] > 0] = 1\n\t\t\ttriangle_mask[:-1,:][triangle_mask[1:,:] > 0] = 1\n\t\t\tvisible_triangles.append(triangle_mask)\n\n\t\tself.visible_triangles = visible_triangles\n\n\n\n\t# Render the current mesh and texture from current cameras\n\tdef render_textured_views(self):\n\t\tmeshes = self.mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(meshes, cameras=self.cameras, lights=self.lights)\n\n\t\treturn [image.permute(2, 0, 1) for image in images_predicted]\n\n\n\t# Bake views into a texture\n\t# First bake into individual textures then combine based on cosine weight\n\[email protected]_grad()\n\tdef bake_texture(self, views=None, main_views=[], cos_weighted=True, channels=None, exp=None, noisy=False, generator=None):\n\t\tif not exp:\n\t\t\texp=1\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tviews = [view.permute(1, 2, 0) for view in views]\n\n\t\ttmp_mesh = self.mesh\n\t\tbake_maps = [torch.zeros(self.target_size+(views[0].shape[2],), device=self.device, requires_grad=True) for view in views]\n\t\toptimizer = torch.optim.SGD(bake_maps, lr=1, momentum=0)\n\t\toptimizer.zero_grad()\n\t\tloss = 0\n\t\tfor i in range(len(self.cameras)): \n\t\t\tbake_tex = TexturesUV([bake_maps[i]], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = bake_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights, device=self.device)\n\t\t\tpredicted_rgb = images_predicted[..., :-1]\n\t\t\tloss += (((predicted_rgb[...] - views[i]))**2).sum()\n\t\tloss.backward(retain_graph=False)\n\t\toptimizer.step()\n\n\t\ttotal_weights = 0\n\t\tbaked = 0\n\t\tfor i in range(len(bake_maps)):\n\t\t\tnormalized_baked_map = bake_maps[i].detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\tbake_map = voronoi_solve(normalized_baked_map, self.gradient_maps[i][...,0])\n\t\t\tweight = self.visible_triangles[i] * (self.cos_maps[i]) ** exp\n\t\t\tif noisy:\n\t\t\t\tnoise = torch.rand(weight.shape[:-1]+(1,), generator=generator).type(weight.dtype).to(weight.device)\n\t\t\t\tweight *= noise\n\t\t\ttotal_weights += weight\n\t\t\tbaked += bake_map * weight\n\t\tbaked /= total_weights + 1E-8\n\t\tbaked = voronoi_solve(baked, total_weights[...,0])\n\n\t\tbake_tex = TexturesUV([baked], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\ttmp_mesh.textures = bake_tex\n\t\textended_mesh = tmp_mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(extended_mesh, cameras=self.cameras, lights=self.lights)\n\t\tlearned_views = [image.permute(2, 0, 1) for image in images_predicted]\n\n\t\treturn learned_views, baked.permute(2, 0, 1), total_weights.permute(2, 0, 1)\n\n\n\t# Move the internel data to a specific device\n\tdef to(self, device):\n\t\tfor mesh_name in [\"mesh\", \"mesh_d\", \"mesh_uv\"]:\n\t\t\tif hasattr(self, mesh_name):\n\t\t\t\tmesh = getattr(self, mesh_name)\n\t\t\t\tsetattr(self, mesh_name, mesh.to(device))\n\t\tfor list_name in [\"visible_triangles\", \"visibility_maps\", \"cos_maps\"]:\n\t\t\tif hasattr(self, list_name):\n\t\t\t\tmap_list = getattr(self, list_name)\n\t\t\t\tfor i in range(len(map_list)):\n\t\t\t\t\tmap_list[i] = map_list[i].to(device)" }, { "identifier": "SamplewiseAttnProcessor2_0", "path": "src/syncmvd/attention.py", "snippet": "class SamplewiseAttnProcessor2_0:\n\tr\"\"\"\n\tProcessor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).\n\t\"\"\"\n\n\tdef __init__(self, custom_attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\t\tif not hasattr(F, \"scaled_dot_product_attention\"):\n\t\t\traise ImportError(\"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\")\n\t\tself.ref_weight = ref_weight\n\t\tself.custom_attention_mask = custom_attention_mask\n\t\tself.ref_attention_mask = ref_attention_mask\n\n\tdef __call__(\n\t\tself,\n\t\tattn: Attention,\n\t\thidden_states,\n\t\tencoder_hidden_states=None,\n\t\tattention_mask=None,\n\t\ttemb=None,\n\t):\n\n\t\tresidual = hidden_states\n\n\t\tif attn.spatial_norm is not None:\n\t\t\thidden_states = attn.spatial_norm(hidden_states, temb)\n\n\t\tinput_ndim = hidden_states.ndim\n\n\n\t\tif input_ndim == 4:\n\t\t\tbatch_size, channel, height, width = hidden_states.shape\n\t\t\thidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)\n\n\t\tbatch_size, sequence_length, channels = (\n\t\t\thidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape\n\t\t)\n\n\t\tif attention_mask is not None:\n\t\t\tattention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n\t\t\t# scaled_dot_product_attention expects attention_mask shape to be\n\t\t\t# (batch, heads, source_length, target_length)\n\t\t\tattention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])\n\n\t\tif attn.group_norm is not None:\n\t\t\thidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n\n\t\tquery = attn.to_q(hidden_states)\n\n\t\tif encoder_hidden_states is None:\n\t\t\tencoder_hidden_states = torch.clone(hidden_states)\n\t\telif attn.norm_cross:\n\t\t\tencoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n\n\t\t'''\n\t\t\treshape encoder hidden state to a single batch\n\t\t'''\n\t\tencoder_hidden_states_f = encoder_hidden_states.reshape(1, -1, channels)\n\n\n\n\t\tkey = attn.to_k(encoder_hidden_states)\n\t\tvalue = attn.to_v(encoder_hidden_states)\n\n\t\tinner_dim = key.shape[-1]\n\t\thead_dim = inner_dim // attn.heads\n\n\t\tquery = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n\n\t\t'''\n\t\t\teach time select 1 sample from q and compute with concated kv\n\t\t\tconcat result hidden states afterwards\n\t\t'''\n\t\thidden_state_list = []\n\n\t\tfor b_idx in range(batch_size):\n\t\t\t\n\t\t\tquery_b = query[b_idx:b_idx+1]\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\tkey_ref = key.clone()\n\t\t\t\tvalue_ref = value.clone()\n\n\t\t\t\tkeys = [key_ref[view_idx] for view_idx in self.ref_attention_mask]\n\t\t\t\tvalues = [value_ref[view_idx] for view_idx in self.ref_attention_mask]\n\n\t\t\t\tkey_ref = torch.stack(keys)\n\t\t\t\tkey_ref = key_ref.view(key_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t\tvalue_ref = torch.stack(values)\n\t\t\t\tvalue_ref = value_ref.view(value_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\tkey_a = key.clone()\n\t\t\tvalue_a = value.clone()\n\n\t\t\t# key_a = key_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\n\t\t\tkeys = [key_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\t\t\tvalues = [value_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\n\t\t\t# keys = (key_a[b_idx-1], key_a[b_idx], key_a[(b_idx+1)%batch_size])\n\t\t\t# values = (value_a[b_idx-1], value_a[b_idx], value_a[(b_idx+1)%batch_size])\n\t\t\t\n\t\t\t# if b_idx not in [0, batch_size-1, batch_size//2]:\n\t\t\t# \tkeys = keys + (key_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\t# \tvalues = values + (value_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\tkey_a = torch.stack(keys)\n\t\t\tkey_a = key_a.view(key_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t# value_a = value_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\t\t\tvalue_a = torch.stack(values)\n\t\t\tvalue_a = value_a.view(value_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\thidden_state_a = F.scaled_dot_product_attention(\n\t\t\t\tquery_b, key_a, value_a, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t)\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\thidden_state_ref = F.scaled_dot_product_attention(\n\t\t\t\t\tquery_b, key_ref, value_ref, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t\t)\n\n\t\t\t\thidden_state = (hidden_state_a + self.ref_weight * hidden_state_ref) / (1+self.ref_weight)\n\t\t\telse:\n\t\t\t\thidden_state = hidden_state_a\n\n\t\t\t# the output of sdp = (batch, num_heads, seq_len, head_dim)\n\t\t\t# TODO: add support for attn.scale when we move to Torch 2.1\n\t\t\t\n\t\t\thidden_state_list.append(hidden_state)\n\n\t\thidden_states = torch.cat(hidden_state_list)\n\n\n\t\thidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)\n\t\thidden_states = hidden_states.to(query.dtype)\n\n\t\t# linear proj\n\t\thidden_states = attn.to_out[0](hidden_states)\n\t\t# dropout\n\t\thidden_states = attn.to_out[1](hidden_states)\n\n\t\tif input_ndim == 4:\n\t\t\thidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)\n\n\t\tif attn.residual_connection:\n\t\t\thidden_states = hidden_states + residual\n\n\t\thidden_states = hidden_states / attn.rescale_output_factor\n\n\t\treturn hidden_states" }, { "identifier": "replace_attention_processors", "path": "src/syncmvd/attention.py", "snippet": "def replace_attention_processors(module, processor, attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\tattn_processors = module.attn_processors\n\tfor k, v in attn_processors.items():\n\t\tif \"attn1\" in k:\n\t\t\tattn_processors[k] = processor(custom_attention_mask=attention_mask, ref_attention_mask=ref_attention_mask, ref_weight=ref_weight)\n\tmodule.set_attn_processor(attn_processors)" }, { "identifier": "step_tex", "path": "src/syncmvd/step.py", "snippet": "@torch.no_grad()\ndef step_tex(\n\t\tscheduler,\n\t\tuvp,\n\t\tmodel_output: torch.FloatTensor,\n\t\ttimestep: int,\n\t\tsample: torch.FloatTensor,\n\t\ttexture: None,\n\t\tgenerator=None,\n\t\treturn_dict: bool = True,\n\t\tguidance_scale = 1,\n\t\tmain_views = [],\n\t\thires_original_views = True,\n\t\texp=None,\n\t\tcos_weighted=True\n):\n\tt = timestep\n\n\tprev_t = scheduler.previous_timestep(t)\n\n\tif model_output.shape[1] == sample.shape[1] * 2 and scheduler.variance_type in [\"learned\", \"learned_range\"]:\n\t\tmodel_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)\n\telse:\n\t\tpredicted_variance = None\n\n\t# 1. compute alphas, betas\n\talpha_prod_t = scheduler.alphas_cumprod[t]\n\talpha_prod_t_prev = scheduler.alphas_cumprod[prev_t] if prev_t >= 0 else scheduler.one\n\tbeta_prod_t = 1 - alpha_prod_t\n\tbeta_prod_t_prev = 1 - alpha_prod_t_prev\n\tcurrent_alpha_t = alpha_prod_t / alpha_prod_t_prev\n\tcurrent_beta_t = 1 - current_alpha_t\n\n\t# 2. compute predicted original sample from predicted noise also called\n\t# \"predicted x_0\" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf\n\tif scheduler.config.prediction_type == \"epsilon\":\n\t\tpred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n\telif scheduler.config.prediction_type == \"sample\":\n\t\tpred_original_sample = model_output\n\telif scheduler.config.prediction_type == \"v_prediction\":\n\t\tpred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n\telse:\n\t\traise ValueError(\n\t\t\tf\"prediction_type given as {scheduler.config.prediction_type} must be one of `epsilon`, `sample` or\"\n\t\t\t\" `v_prediction` for the DDPMScheduler.\"\n\t\t)\n\n\t# 3. Clip or threshold \"predicted x_0\"\n\tif scheduler.config.thresholding:\n\t\tpred_original_sample = scheduler._threshold_sample(pred_original_sample)\n\telif scheduler.config.clip_sample:\n\t\tpred_original_sample = pred_original_sample.clamp(\n\t\t\t-scheduler.config.clip_sample_range, scheduler.config.clip_sample_range\n\t\t)\n\n\t# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\tpred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t\n\tcurrent_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t\n\n\t'''\n\t\tAdd multidiffusion here\n\t'''\n\n\tif texture is None:\n\t\tsample_views = [view for view in sample]\n\t\tsample_views, texture, _ = uvp.bake_texture(views=sample_views, main_views=main_views, exp=exp)\n\t\tsample_views = torch.stack(sample_views, axis=0)[:,:-1,...]\n\n\n\toriginal_views = [view for view in pred_original_sample]\n\toriginal_views, original_tex, visibility_weights = uvp.bake_texture(views=original_views, main_views=main_views, exp=exp)\n\tuvp.set_texture_map(original_tex)\n\toriginal_views = uvp.render_textured_views()\n\toriginal_views = torch.stack(original_views, axis=0)[:,:-1,...]\n\n\t# 5. Compute predicted previous sample µ_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\t# pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample\n\tprev_tex = pred_original_sample_coeff * original_tex + current_sample_coeff * texture\n\n\t# 6. Add noise\n\tvariance = 0\n\n\tif predicted_variance is not None:\n\t\tvariance_views = [view for view in predicted_variance]\n\t\tvariance_views, variance_tex, visibility_weights = uvp.bake_texture(views=variance_views, main_views=main_views, cos_weighted=cos_weighted, exp=exp)\n\t\tvariance_views = torch.stack(variance_views, axis=0)[:,:-1,...]\n\telse:\n\t\tvariance_tex = None\n\n\tif t > 0:\n\t\tdevice = texture.device\n\t\tvariance_noise = randn_tensor(\n\t\t\ttexture.shape, generator=generator, device=device, dtype=texture.dtype\n\t\t)\n\t\tif scheduler.variance_type == \"fixed_small_log\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex) * variance_noise\n\t\telif scheduler.variance_type == \"learned_range\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex)\n\t\t\tvariance = torch.exp(0.5 * variance) * variance_noise\n\t\telse:\n\t\t\tvariance = (scheduler._get_variance(t, predicted_variance=variance_tex) ** 0.5) * variance_noise\n\n\tprev_tex = prev_tex + variance\n\n\tuvp.set_texture_map(prev_tex)\n\tprev_views = uvp.render_textured_views()\n\tpred_prev_sample = torch.clone(sample)\n\tfor i, view in enumerate(prev_views):\n\t\tpred_prev_sample[i] = view[:-1]\n\tmasks = [view[-1:] for view in prev_views]\n\n\treturn {\"prev_sample\": pred_prev_sample, \"pred_original_sample\":pred_original_sample, \"prev_tex\": prev_tex}\n\n\tif not return_dict:\n\t\treturn pred_prev_sample, pred_original_sample\n\tpass" } ]
import os import numpy as np import math import random import torch import select import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from PIL import Image from IPython.display import display from torch import functional as F from torch import nn from torchvision.transforms import Compose, Resize, GaussianBlur, InterpolationMode from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import DDPMScheduler, DDIMScheduler, UniPCMultistepScheduler from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( BaseOutput, randn_tensor, numpy_to_pil, pt_to_pil, # make_image_grid, is_accelerate_available, is_accelerate_version, is_compiled_module, logging, randn_tensor, replace_example_docstring ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.models.attention_processor import Attention, AttentionProcessor from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from .renderer.project import UVProjection as UVP from .syncmvd.attention import SamplewiseAttnProcessor2_0, replace_attention_processors from .syncmvd.prompt import * from .syncmvd.step import step_tex from .utils import *
11,978
# 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end):
if torch.cuda.is_available(): device = torch.device("cuda:0") torch.cuda.set_device(device) else: device = torch.device("cpu") # Background colors color_constants = {"black": [-1, -1, -1], "white": [1, 1, 1], "maroon": [0, -1, -1], "red": [1, -1, -1], "olive": [0, 0, -1], "yellow": [1, 1, -1], "green": [-1, 0, -1], "lime": [-1 ,1, -1], "teal": [-1, 0, 0], "aqua": [-1, 1, 1], "navy": [-1, -1, 0], "blue": [-1, -1, 1], "purple": [0, -1 , 0], "fuchsia": [1, -1, 1]} color_names = list(color_constants.keys()) # Used to generate depth or normal conditioning images @torch.no_grad() def get_conditioning_images(uvp, output_size, render_size=512, blur_filter=5, cond_type="normal"): verts, normals, depths, cos_maps, texels, fragments = uvp.render_geometry(image_size=render_size) masks = normals[...,3][:,None,...] masks = Resize((output_size//8,)*2, antialias=True)(masks) normals_transforms = Compose([ Resize((output_size,)*2, interpolation=InterpolationMode.BILINEAR, antialias=True), GaussianBlur(blur_filter, blur_filter//3+1)] ) if cond_type == "normal": view_normals = uvp.decode_view_normal(normals).permute(0,3,1,2) *2 - 1 conditional_images = normals_transforms(view_normals) # Some problem here, depth controlnet don't work when depth is normalized # But it do generate using the unnormalized form as below elif cond_type == "depth": view_depths = uvp.decode_normalized_depth(depths).permute(0,3,1,2) conditional_images = normals_transforms(view_depths) return conditional_images, masks # Revert time 0 background to time t to composite with time t foreground @torch.no_grad() def composite_rendered_view(scheduler, backgrounds, foregrounds, masks, t): composited_images = [] for i, (background, foreground, mask) in enumerate(zip(backgrounds, foregrounds, masks)): if t > 0: alphas_cumprod = scheduler.alphas_cumprod[t] noise = torch.normal(0, 1, background.shape, device=background.device) background = (1-alphas_cumprod) * noise + alphas_cumprod * background composited = foreground * mask + background * (1-mask) composited_images.append(composited) composited_tensor = torch.stack(composited_images) return composited_tensor # Split into micro-batches to use less memory in each unet prediction # But need more investigation on reducing memory usage # Assume it has no possitive effect and use a large "max_batch_size" to skip splitting def split_groups(attention_mask, max_batch_size, ref_view=[]): group_sets = [] group = set() ref_group = set() idx = 0 while idx < len(attention_mask): new_group = group | set([idx]) new_ref_group = (ref_group | set(attention_mask[idx] + ref_view)) - new_group if len(new_group) + len(new_ref_group) <= max_batch_size: group = new_group ref_group = new_ref_group idx += 1 else: assert len(group) != 0, "Cannot fit into a group" group_sets.append((group, ref_group)) group = set() ref_group = set() if len(group)>0: group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color self.uvp = UVP(texture_size=texture_size, render_size=latent_size, sampling_mode="nearest", channels=4, device=self._execution_device) if mesh_path.lower().endswith(".obj"): self.uvp.load_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) elif mesh_path.lower().endswith(".glb"): self.uvp.load_glb_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) else: assert False, "The mesh file format is not supported. Use .obj or .glb." self.uvp.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) self.uvp_rgb = UVP(texture_size=texture_rgb_size, render_size=render_rgb_size, sampling_mode="nearest", channels=3, device=self._execution_device) self.uvp_rgb.mesh = self.uvp.mesh.clone() self.uvp_rgb.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) _,_,_,cos_maps,_, _ = self.uvp_rgb.render_geometry() self.uvp_rgb.calculate_cos_angle_weights(cos_maps, fill=False) # Save some VRAM del _, cos_maps self.uvp.to("cpu") self.uvp_rgb.to("cpu") color_images = torch.FloatTensor([color_constants[name] for name in color_names]).reshape(-1,3,1,1).to(dtype=self.text_encoder.dtype, device=self._execution_device) color_images = torch.ones( (1,1,latent_size*8, latent_size*8), device=self._execution_device, dtype=self.text_encoder.dtype ) * color_images color_images *= ((0.5*color_images)+0.5) color_latents = encode_latents(self.vae, color_images) self.color_latents = {color[0]:color[1] for color in zip(color_names, [latent for latent in color_latents])} self.vae = self.vae.to("cpu") print("Done Initialization") ''' Modified from a StableDiffusion ControlNet pipeline Multi ControlNet not supported yet ''' @torch.no_grad() def __call__( self, prompt: str = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: str = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, return_dict: bool = False, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, max_batch_size=6, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_guess_mode: bool = False, controlnet_conditioning_scale: Union[float, List[float]] = 0.7, controlnet_conditioning_end_scale: Union[float, List[float]] = 0.9, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 0.99, guidance_rescale: float = 0.0, mesh_path: str = None, mesh_transform: dict = None, mesh_autouv = False, camera_azims=None, camera_centers=None, top_cameras=True, texture_size = 1536, render_rgb_size=1024, texture_rgb_size = 1024, multiview_diffusion_end=0.8, shuffle_background_change=0.4, shuffle_background_end=0.99, #0.4 use_directional_prompt=True, ref_attention_end=0.2, logging_config=None, cond_type="depth", ): # Setup pipeline settings self.initialize_pipeline( mesh_path=mesh_path, mesh_transform=mesh_transform, mesh_autouv=mesh_autouv, camera_azims=camera_azims, camera_centers=camera_centers, top_cameras=top_cameras, ref_views=[], latent_size=height//8, render_rgb_size=render_rgb_size, texture_size=texture_size, texture_rgb_size=texture_rgb_size, max_batch_size=max_batch_size, logging_config=logging_config ) num_timesteps = self.scheduler.config.num_train_timesteps initial_controlnet_conditioning_scale = controlnet_conditioning_scale log_interval = logging_config.get("log_interval", 10) view_fast_preview = logging_config.get("view_fast_preview", True) tex_fast_preview = logging_config.get("tex_fast_preview", True) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): # mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 mult = 1 control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ control_guidance_end ] # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, torch.zeros((1,3,height,width), device=self._execution_device), callback_steps, negative_prompt, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, list): assert len(prompt) == 1 and len(negative_prompt) == 1, "Only implemented for 1 (negative) prompt" assert num_images_per_prompt == 1, "Only implemented for 1 image per-prompt" batch_size = len(self.uvp.cameras) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): # controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = controlnet_guess_mode or global_pool_conditions # 3. Encode input prompt prompt, negative_prompt = prepare_directional_prompt(prompt, negative_prompt) text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=text_encoder_lora_scale, ) negative_prompt_embeds, prompt_embeds = torch.chunk(prompt_embeds, 2) prompt_embed_dict = dict(zip(direction_names, [emb for emb in prompt_embeds])) negative_prompt_embed_dict = dict(zip(direction_names, [emb for emb in negative_prompt_embeds])) # (4. Prepare image) This pipeline use internal conditional images from Pytorch3D self.uvp.to(self._execution_device) conditioning_images, masks = get_conditioning_images(self.uvp, height, cond_type=cond_type) conditioning_images = conditioning_images.type(prompt_embeds.dtype) cond = (conditioning_images/2+0.5).permute(0,2,3,1).cpu().numpy() cond = np.concatenate([img for img in cond], axis=1) numpy_to_pil(cond)[0].save(f"{self.intermediate_dir}/cond.jpg") # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, None, ) latent_tex = self.uvp.set_noise_texture() noise_views = self.uvp.render_textured_views() foregrounds = [view[:-1] for view in noise_views] masks = [view[-1:] for view in noise_views] composited_tensor = composite_rendered_view(self.scheduler, latents, foregrounds, masks, timesteps[0]+1) latents = composited_tensor.type(latents.dtype) self.uvp.to("cpu") # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end):
replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1)
2
2023-12-09 03:27:58+00:00
16k